1 // Copyright 2019 Intel Corporation. All Rights Reserved. 2 // Copyright 2019-2021 Alibaba Cloud. All rights reserved. 3 // 4 // SPDX-License-Identifier: Apache-2.0 5 6 use std::error; 7 use std::fs::File; 8 use std::io; 9 use std::os::unix::io::AsRawFd; 10 use std::sync::Arc; 11 use std::thread; 12 13 use vhost::vhost_user::message::{ 14 VhostUserConfigFlags, VhostUserMemoryRegion, VhostUserProtocolFeatures, 15 VhostUserSingleMemoryRegion, VhostUserVirtioFeatures, VhostUserVringAddrFlags, 16 VhostUserVringState, 17 }; 18 use vhost::vhost_user::{ 19 Error as VhostUserError, Result as VhostUserResult, Slave, VhostUserSlaveReqHandlerMut, 20 }; 21 use virtio_bindings::bindings::virtio_ring::VIRTIO_RING_F_EVENT_IDX; 22 use virtio_queue::{Error as VirtQueError, QueueT}; 23 use vm_memory::bitmap::Bitmap; 24 use vm_memory::mmap::NewBitmap; 25 use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemoryMmap, GuestRegionMmap}; 26 use vmm_sys_util::epoll::EventSet; 27 28 use super::backend::VhostUserBackend; 29 use super::event_loop::VringEpollHandler; 30 use super::event_loop::{VringEpollError, VringEpollResult}; 31 use super::vring::VringT; 32 use super::GM; 33 34 const MAX_MEM_SLOTS: u64 = 32; 35 36 #[derive(Debug)] 37 /// Errors related to vhost-user handler. 38 pub enum VhostUserHandlerError { 39 /// Failed to create a `Vring`. 40 CreateVring(VirtQueError), 41 /// Failed to create vring worker. 42 CreateEpollHandler(VringEpollError), 43 /// Failed to spawn vring worker. 44 SpawnVringWorker(io::Error), 45 /// Could not find the mapping from memory regions. 46 MissingMemoryMapping, 47 } 48 49 impl std::fmt::Display for VhostUserHandlerError { fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result50 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { 51 match self { 52 VhostUserHandlerError::CreateVring(e) => { 53 write!(f, "failed to create vring: {}", e) 54 } 55 VhostUserHandlerError::CreateEpollHandler(e) => { 56 write!(f, "failed to create vring epoll handler: {}", e) 57 } 58 VhostUserHandlerError::SpawnVringWorker(e) => { 59 write!(f, "failed spawning the vring worker: {}", e) 60 } 61 VhostUserHandlerError::MissingMemoryMapping => write!(f, "Missing memory mapping"), 62 } 63 } 64 } 65 66 impl error::Error for VhostUserHandlerError {} 67 68 /// Result of vhost-user handler operations. 69 pub type VhostUserHandlerResult<T> = std::result::Result<T, VhostUserHandlerError>; 70 71 struct AddrMapping { 72 vmm_addr: u64, 73 size: u64, 74 gpa_base: u64, 75 } 76 77 pub struct VhostUserHandler<S, V, B: Bitmap + 'static> { 78 backend: S, 79 handlers: Vec<Arc<VringEpollHandler<S, V, B>>>, 80 owned: bool, 81 features_acked: bool, 82 acked_features: u64, 83 acked_protocol_features: u64, 84 num_queues: usize, 85 max_queue_size: usize, 86 queues_per_thread: Vec<u64>, 87 mappings: Vec<AddrMapping>, 88 atomic_mem: GM<B>, 89 vrings: Vec<V>, 90 worker_threads: Vec<thread::JoinHandle<VringEpollResult<()>>>, 91 } 92 93 // Ensure VhostUserHandler: Clone + Send + Sync + 'static. 94 impl<S, V, B> VhostUserHandler<S, V, B> 95 where 96 S: VhostUserBackend<V, B> + Clone + 'static, 97 V: VringT<GM<B>> + Clone + Send + Sync + 'static, 98 B: Bitmap + Clone + Send + Sync + 'static, 99 { new(backend: S, atomic_mem: GM<B>) -> VhostUserHandlerResult<Self>100 pub(crate) fn new(backend: S, atomic_mem: GM<B>) -> VhostUserHandlerResult<Self> { 101 let num_queues = backend.num_queues(); 102 let max_queue_size = backend.max_queue_size(); 103 let queues_per_thread = backend.queues_per_thread(); 104 105 let mut vrings = Vec::new(); 106 for _ in 0..num_queues { 107 let vring = V::new(atomic_mem.clone(), max_queue_size as u16) 108 .map_err(VhostUserHandlerError::CreateVring)?; 109 vrings.push(vring); 110 } 111 112 let mut handlers = Vec::new(); 113 let mut worker_threads = Vec::new(); 114 for (thread_id, queues_mask) in queues_per_thread.iter().enumerate() { 115 let mut thread_vrings = Vec::new(); 116 for (index, vring) in vrings.iter().enumerate() { 117 if (queues_mask >> index) & 1u64 == 1u64 { 118 thread_vrings.push(vring.clone()); 119 } 120 } 121 122 let handler = Arc::new( 123 VringEpollHandler::new(backend.clone(), thread_vrings, thread_id) 124 .map_err(VhostUserHandlerError::CreateEpollHandler)?, 125 ); 126 let handler2 = handler.clone(); 127 let worker_thread = thread::Builder::new() 128 .name("vring_worker".to_string()) 129 .spawn(move || handler2.run()) 130 .map_err(VhostUserHandlerError::SpawnVringWorker)?; 131 132 handlers.push(handler); 133 worker_threads.push(worker_thread); 134 } 135 136 Ok(VhostUserHandler { 137 backend, 138 handlers, 139 owned: false, 140 features_acked: false, 141 acked_features: 0, 142 acked_protocol_features: 0, 143 num_queues, 144 max_queue_size, 145 queues_per_thread, 146 mappings: Vec::new(), 147 atomic_mem, 148 vrings, 149 worker_threads, 150 }) 151 } 152 } 153 154 impl<S, V, B: Bitmap> VhostUserHandler<S, V, B> { send_exit_event(&self)155 pub(crate) fn send_exit_event(&self) { 156 for handler in self.handlers.iter() { 157 handler.send_exit_event(); 158 } 159 } 160 vmm_va_to_gpa(&self, vmm_va: u64) -> VhostUserHandlerResult<u64>161 fn vmm_va_to_gpa(&self, vmm_va: u64) -> VhostUserHandlerResult<u64> { 162 for mapping in self.mappings.iter() { 163 if vmm_va >= mapping.vmm_addr && vmm_va < mapping.vmm_addr + mapping.size { 164 return Ok(vmm_va - mapping.vmm_addr + mapping.gpa_base); 165 } 166 } 167 168 Err(VhostUserHandlerError::MissingMemoryMapping) 169 } 170 } 171 172 impl<S, V, B> VhostUserHandler<S, V, B> 173 where 174 S: VhostUserBackend<V, B>, 175 V: VringT<GM<B>>, 176 B: Bitmap, 177 { get_epoll_handlers(&self) -> Vec<Arc<VringEpollHandler<S, V, B>>>178 pub(crate) fn get_epoll_handlers(&self) -> Vec<Arc<VringEpollHandler<S, V, B>>> { 179 self.handlers.clone() 180 } 181 vring_needs_init(&self, vring: &V) -> bool182 fn vring_needs_init(&self, vring: &V) -> bool { 183 let vring_state = vring.get_ref(); 184 185 // If the vring wasn't initialized and we already have an EventFd for 186 // VRING_KICK, initialize it now. 187 !vring_state.get_queue().ready() && vring_state.get_kick().is_some() 188 } 189 initialize_vring(&self, vring: &V, index: u8) -> VhostUserResult<()>190 fn initialize_vring(&self, vring: &V, index: u8) -> VhostUserResult<()> { 191 assert!(vring.get_ref().get_kick().is_some()); 192 193 if let Some(fd) = vring.get_ref().get_kick() { 194 for (thread_index, queues_mask) in self.queues_per_thread.iter().enumerate() { 195 let shifted_queues_mask = queues_mask >> index; 196 if shifted_queues_mask & 1u64 == 1u64 { 197 let evt_idx = queues_mask.count_ones() - shifted_queues_mask.count_ones(); 198 self.handlers[thread_index] 199 .register_event(fd.as_raw_fd(), EventSet::IN, u64::from(evt_idx)) 200 .map_err(VhostUserError::ReqHandlerError)?; 201 break; 202 } 203 } 204 } 205 206 self.vrings[index as usize].set_queue_ready(true); 207 208 Ok(()) 209 } 210 211 /// Helper to check if VirtioFeature enabled check_feature(&self, feat: VhostUserVirtioFeatures) -> VhostUserResult<()>212 fn check_feature(&self, feat: VhostUserVirtioFeatures) -> VhostUserResult<()> { 213 if self.acked_features & feat.bits() != 0 { 214 Ok(()) 215 } else { 216 Err(VhostUserError::InactiveFeature(feat)) 217 } 218 } 219 } 220 221 impl<S, V, B> VhostUserSlaveReqHandlerMut for VhostUserHandler<S, V, B> 222 where 223 S: VhostUserBackend<V, B>, 224 V: VringT<GM<B>>, 225 B: NewBitmap + Clone, 226 { set_owner(&mut self) -> VhostUserResult<()>227 fn set_owner(&mut self) -> VhostUserResult<()> { 228 if self.owned { 229 return Err(VhostUserError::InvalidOperation("already claimed")); 230 } 231 self.owned = true; 232 Ok(()) 233 } 234 reset_owner(&mut self) -> VhostUserResult<()>235 fn reset_owner(&mut self) -> VhostUserResult<()> { 236 self.owned = false; 237 self.features_acked = false; 238 self.acked_features = 0; 239 self.acked_protocol_features = 0; 240 Ok(()) 241 } 242 get_features(&mut self) -> VhostUserResult<u64>243 fn get_features(&mut self) -> VhostUserResult<u64> { 244 Ok(self.backend.features()) 245 } 246 set_features(&mut self, features: u64) -> VhostUserResult<()>247 fn set_features(&mut self, features: u64) -> VhostUserResult<()> { 248 if (features & !self.backend.features()) != 0 { 249 return Err(VhostUserError::InvalidParam); 250 } 251 252 self.acked_features = features; 253 self.features_acked = true; 254 255 // If VHOST_USER_F_PROTOCOL_FEATURES has not been negotiated, 256 // the ring is initialized in an enabled state. 257 // If VHOST_USER_F_PROTOCOL_FEATURES has been negotiated, 258 // the ring is initialized in a disabled state. Client must not 259 // pass data to/from the backend until ring is enabled by 260 // VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has 261 // been disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0. 262 let vring_enabled = 263 self.acked_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0; 264 for vring in self.vrings.iter_mut() { 265 vring.set_enabled(vring_enabled); 266 } 267 268 self.backend.acked_features(self.acked_features); 269 270 Ok(()) 271 } 272 set_mem_table( &mut self, ctx: &[VhostUserMemoryRegion], files: Vec<File>, ) -> VhostUserResult<()>273 fn set_mem_table( 274 &mut self, 275 ctx: &[VhostUserMemoryRegion], 276 files: Vec<File>, 277 ) -> VhostUserResult<()> { 278 // We need to create tuple of ranges from the list of VhostUserMemoryRegion 279 // that we get from the caller. 280 let mut regions = Vec::new(); 281 let mut mappings: Vec<AddrMapping> = Vec::new(); 282 283 for (region, file) in ctx.iter().zip(files) { 284 regions.push( 285 GuestRegionMmap::new( 286 region.mmap_region(file)?, 287 GuestAddress(region.guest_phys_addr), 288 ) 289 .map_err(|e| { 290 VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 291 })?, 292 ); 293 mappings.push(AddrMapping { 294 vmm_addr: region.user_addr, 295 size: region.memory_size, 296 gpa_base: region.guest_phys_addr, 297 }); 298 } 299 300 let mem = GuestMemoryMmap::from_regions(regions).map_err(|e| { 301 VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 302 })?; 303 304 // Updating the inner GuestMemory object here will cause all our vrings to 305 // see the new one the next time they call to `atomic_mem.memory()`. 306 self.atomic_mem.lock().unwrap().replace(mem); 307 308 self.backend 309 .update_memory(self.atomic_mem.clone()) 310 .map_err(|e| { 311 VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 312 })?; 313 self.mappings = mappings; 314 315 Ok(()) 316 } 317 set_vring_num(&mut self, index: u32, num: u32) -> VhostUserResult<()>318 fn set_vring_num(&mut self, index: u32, num: u32) -> VhostUserResult<()> { 319 if index as usize >= self.num_queues || num == 0 || num as usize > self.max_queue_size { 320 return Err(VhostUserError::InvalidParam); 321 } 322 self.vrings[index as usize].set_queue_size(num as u16); 323 Ok(()) 324 } 325 set_vring_addr( &mut self, index: u32, _flags: VhostUserVringAddrFlags, descriptor: u64, used: u64, available: u64, _log: u64, ) -> VhostUserResult<()>326 fn set_vring_addr( 327 &mut self, 328 index: u32, 329 _flags: VhostUserVringAddrFlags, 330 descriptor: u64, 331 used: u64, 332 available: u64, 333 _log: u64, 334 ) -> VhostUserResult<()> { 335 if index as usize >= self.num_queues { 336 return Err(VhostUserError::InvalidParam); 337 } 338 339 if !self.mappings.is_empty() { 340 let desc_table = self.vmm_va_to_gpa(descriptor).map_err(|e| { 341 VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 342 })?; 343 let avail_ring = self.vmm_va_to_gpa(available).map_err(|e| { 344 VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 345 })?; 346 let used_ring = self.vmm_va_to_gpa(used).map_err(|e| { 347 VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 348 })?; 349 self.vrings[index as usize] 350 .set_queue_info(desc_table, avail_ring, used_ring) 351 .map_err(|_| VhostUserError::InvalidParam)?; 352 353 // SET_VRING_BASE will only restore the 'avail' index, however, after the guest driver 354 // changes, for instance, after reboot, the 'used' index should be reset to 0. 355 // 356 // So let's fetch the used index from the vring as set by the guest here to keep 357 // compatibility with the QEMU's vhost-user library just in case, any implementation 358 // expects the 'used' index to be set when receiving a SET_VRING_ADDR message. 359 // 360 // Note: I'm not sure why QEMU's vhost-user library sets the 'user' index here, 361 // _probably_ to make sure that the VQ is already configured. A better solution would 362 // be to receive the 'used' index in SET_VRING_BASE, as is done when using packed VQs. 363 let idx = self.vrings[index as usize] 364 .queue_used_idx() 365 .map_err(|_| VhostUserError::SlaveInternalError)?; 366 self.vrings[index as usize].set_queue_next_used(idx); 367 368 Ok(()) 369 } else { 370 Err(VhostUserError::InvalidParam) 371 } 372 } 373 set_vring_base(&mut self, index: u32, base: u32) -> VhostUserResult<()>374 fn set_vring_base(&mut self, index: u32, base: u32) -> VhostUserResult<()> { 375 let event_idx: bool = (self.acked_features & (1 << VIRTIO_RING_F_EVENT_IDX)) != 0; 376 377 self.vrings[index as usize].set_queue_next_avail(base as u16); 378 self.vrings[index as usize].set_queue_event_idx(event_idx); 379 self.backend.set_event_idx(event_idx); 380 381 Ok(()) 382 } 383 get_vring_base(&mut self, index: u32) -> VhostUserResult<VhostUserVringState>384 fn get_vring_base(&mut self, index: u32) -> VhostUserResult<VhostUserVringState> { 385 if index as usize >= self.num_queues { 386 return Err(VhostUserError::InvalidParam); 387 } 388 389 // Quote from vhost-user specification: 390 // Client must start ring upon receiving a kick (that is, detecting 391 // that file descriptor is readable) on the descriptor specified by 392 // VHOST_USER_SET_VRING_KICK, and stop ring upon receiving 393 // VHOST_USER_GET_VRING_BASE. 394 self.vrings[index as usize].set_queue_ready(false); 395 396 if let Some(fd) = self.vrings[index as usize].get_ref().get_kick() { 397 for (thread_index, queues_mask) in self.queues_per_thread.iter().enumerate() { 398 let shifted_queues_mask = queues_mask >> index; 399 if shifted_queues_mask & 1u64 == 1u64 { 400 let evt_idx = queues_mask.count_ones() - shifted_queues_mask.count_ones(); 401 self.handlers[thread_index] 402 .unregister_event(fd.as_raw_fd(), EventSet::IN, u64::from(evt_idx)) 403 .map_err(VhostUserError::ReqHandlerError)?; 404 break; 405 } 406 } 407 } 408 409 let next_avail = self.vrings[index as usize].queue_next_avail(); 410 411 self.vrings[index as usize].set_kick(None); 412 self.vrings[index as usize].set_call(None); 413 414 Ok(VhostUserVringState::new(index, u32::from(next_avail))) 415 } 416 set_vring_kick(&mut self, index: u8, file: Option<File>) -> VhostUserResult<()>417 fn set_vring_kick(&mut self, index: u8, file: Option<File>) -> VhostUserResult<()> { 418 if index as usize >= self.num_queues { 419 return Err(VhostUserError::InvalidParam); 420 } 421 422 // SAFETY: EventFd requires that it has sole ownership of its fd. So 423 // does File, so this is safe. 424 // Ideally, we'd have a generic way to refer to a uniquely-owned fd, 425 // such as that proposed by Rust RFC #3128. 426 self.vrings[index as usize].set_kick(file); 427 428 if self.vring_needs_init(&self.vrings[index as usize]) { 429 self.initialize_vring(&self.vrings[index as usize], index)?; 430 } 431 432 Ok(()) 433 } 434 set_vring_call(&mut self, index: u8, file: Option<File>) -> VhostUserResult<()>435 fn set_vring_call(&mut self, index: u8, file: Option<File>) -> VhostUserResult<()> { 436 if index as usize >= self.num_queues { 437 return Err(VhostUserError::InvalidParam); 438 } 439 440 self.vrings[index as usize].set_call(file); 441 442 if self.vring_needs_init(&self.vrings[index as usize]) { 443 self.initialize_vring(&self.vrings[index as usize], index)?; 444 } 445 446 Ok(()) 447 } 448 set_vring_err(&mut self, index: u8, file: Option<File>) -> VhostUserResult<()>449 fn set_vring_err(&mut self, index: u8, file: Option<File>) -> VhostUserResult<()> { 450 if index as usize >= self.num_queues { 451 return Err(VhostUserError::InvalidParam); 452 } 453 454 self.vrings[index as usize].set_err(file); 455 456 Ok(()) 457 } 458 get_protocol_features(&mut self) -> VhostUserResult<VhostUserProtocolFeatures>459 fn get_protocol_features(&mut self) -> VhostUserResult<VhostUserProtocolFeatures> { 460 Ok(self.backend.protocol_features()) 461 } 462 set_protocol_features(&mut self, features: u64) -> VhostUserResult<()>463 fn set_protocol_features(&mut self, features: u64) -> VhostUserResult<()> { 464 // Note: slave that reported VHOST_USER_F_PROTOCOL_FEATURES must 465 // support this message even before VHOST_USER_SET_FEATURES was 466 // called. 467 self.acked_protocol_features = features; 468 Ok(()) 469 } 470 get_queue_num(&mut self) -> VhostUserResult<u64>471 fn get_queue_num(&mut self) -> VhostUserResult<u64> { 472 Ok(self.num_queues as u64) 473 } 474 set_vring_enable(&mut self, index: u32, enable: bool) -> VhostUserResult<()>475 fn set_vring_enable(&mut self, index: u32, enable: bool) -> VhostUserResult<()> { 476 // This request should be handled only when VHOST_USER_F_PROTOCOL_FEATURES 477 // has been negotiated. 478 self.check_feature(VhostUserVirtioFeatures::PROTOCOL_FEATURES)?; 479 480 if index as usize >= self.num_queues { 481 return Err(VhostUserError::InvalidParam); 482 } 483 484 // Slave must not pass data to/from the backend until ring is 485 // enabled by VHOST_USER_SET_VRING_ENABLE with parameter 1, 486 // or after it has been disabled by VHOST_USER_SET_VRING_ENABLE 487 // with parameter 0. 488 self.vrings[index as usize].set_enabled(enable); 489 490 Ok(()) 491 } 492 get_config( &mut self, offset: u32, size: u32, _flags: VhostUserConfigFlags, ) -> VhostUserResult<Vec<u8>>493 fn get_config( 494 &mut self, 495 offset: u32, 496 size: u32, 497 _flags: VhostUserConfigFlags, 498 ) -> VhostUserResult<Vec<u8>> { 499 Ok(self.backend.get_config(offset, size)) 500 } 501 set_config( &mut self, offset: u32, buf: &[u8], _flags: VhostUserConfigFlags, ) -> VhostUserResult<()>502 fn set_config( 503 &mut self, 504 offset: u32, 505 buf: &[u8], 506 _flags: VhostUserConfigFlags, 507 ) -> VhostUserResult<()> { 508 self.backend 509 .set_config(offset, buf) 510 .map_err(VhostUserError::ReqHandlerError) 511 } 512 set_slave_req_fd(&mut self, slave: Slave)513 fn set_slave_req_fd(&mut self, slave: Slave) { 514 if self.acked_protocol_features & VhostUserProtocolFeatures::REPLY_ACK.bits() != 0 { 515 slave.set_reply_ack_flag(true); 516 } 517 518 self.backend.set_slave_req_fd(slave); 519 } 520 get_inflight_fd( &mut self, _inflight: &vhost::vhost_user::message::VhostUserInflight, ) -> VhostUserResult<(vhost::vhost_user::message::VhostUserInflight, File)>521 fn get_inflight_fd( 522 &mut self, 523 _inflight: &vhost::vhost_user::message::VhostUserInflight, 524 ) -> VhostUserResult<(vhost::vhost_user::message::VhostUserInflight, File)> { 525 // Assume the backend hasn't negotiated the inflight feature; it 526 // wouldn't be correct for the backend to do so, as we don't (yet) 527 // provide a way for it to handle such requests. 528 Err(VhostUserError::InvalidOperation("not supported")) 529 } 530 set_inflight_fd( &mut self, _inflight: &vhost::vhost_user::message::VhostUserInflight, _file: File, ) -> VhostUserResult<()>531 fn set_inflight_fd( 532 &mut self, 533 _inflight: &vhost::vhost_user::message::VhostUserInflight, 534 _file: File, 535 ) -> VhostUserResult<()> { 536 Err(VhostUserError::InvalidOperation("not supported")) 537 } 538 get_max_mem_slots(&mut self) -> VhostUserResult<u64>539 fn get_max_mem_slots(&mut self) -> VhostUserResult<u64> { 540 Ok(MAX_MEM_SLOTS) 541 } 542 add_mem_region( &mut self, region: &VhostUserSingleMemoryRegion, file: File, ) -> VhostUserResult<()>543 fn add_mem_region( 544 &mut self, 545 region: &VhostUserSingleMemoryRegion, 546 file: File, 547 ) -> VhostUserResult<()> { 548 let guest_region = Arc::new( 549 GuestRegionMmap::new( 550 region.mmap_region(file)?, 551 GuestAddress(region.guest_phys_addr), 552 ) 553 .map_err(|e| { 554 VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 555 })?, 556 ); 557 558 let mem = self 559 .atomic_mem 560 .memory() 561 .insert_region(guest_region) 562 .map_err(|e| { 563 VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 564 })?; 565 566 self.atomic_mem.lock().unwrap().replace(mem); 567 568 self.backend 569 .update_memory(self.atomic_mem.clone()) 570 .map_err(|e| { 571 VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 572 })?; 573 574 self.mappings.push(AddrMapping { 575 vmm_addr: region.user_addr, 576 size: region.memory_size, 577 gpa_base: region.guest_phys_addr, 578 }); 579 580 Ok(()) 581 } 582 remove_mem_region(&mut self, region: &VhostUserSingleMemoryRegion) -> VhostUserResult<()>583 fn remove_mem_region(&mut self, region: &VhostUserSingleMemoryRegion) -> VhostUserResult<()> { 584 let (mem, _) = self 585 .atomic_mem 586 .memory() 587 .remove_region(GuestAddress(region.guest_phys_addr), region.memory_size) 588 .map_err(|e| { 589 VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 590 })?; 591 592 self.atomic_mem.lock().unwrap().replace(mem); 593 594 self.backend 595 .update_memory(self.atomic_mem.clone()) 596 .map_err(|e| { 597 VhostUserError::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)) 598 })?; 599 600 self.mappings 601 .retain(|mapping| mapping.gpa_base != region.guest_phys_addr); 602 603 Ok(()) 604 } 605 } 606 607 impl<S, V, B: Bitmap> Drop for VhostUserHandler<S, V, B> { drop(&mut self)608 fn drop(&mut self) { 609 // Signal all working threads to exit. 610 self.send_exit_event(); 611 612 for thread in self.worker_threads.drain(..) { 613 if let Err(e) = thread.join() { 614 error!("Error in vring worker: {:?}", e); 615 } 616 } 617 } 618 } 619