1 #![deny(unsafe_op_in_unsafe_fn)]
2 
3 use crate::hal::{BufferDirection, Dma, Hal, PhysAddr};
4 use crate::transport::Transport;
5 use crate::{align_up, nonnull_slice_from_raw_parts, pages, Error, Result, PAGE_SIZE};
6 #[cfg(feature = "alloc")]
7 use alloc::boxed::Box;
8 use bitflags::bitflags;
9 #[cfg(test)]
10 use core::cmp::min;
11 use core::convert::TryInto;
12 use core::hint::spin_loop;
13 use core::mem::{size_of, take};
14 #[cfg(test)]
15 use core::ptr;
16 use core::ptr::NonNull;
17 use core::sync::atomic::{fence, AtomicU16, Ordering};
18 use zerocopy::{AsBytes, FromBytes, FromZeroes};
19 
20 /// The mechanism for bulk data transport on virtio devices.
21 ///
22 /// Each device can have zero or more virtqueues.
23 ///
24 /// * `SIZE`: The size of the queue. This is both the number of descriptors, and the number of slots
25 ///   in the available and used rings. It must be a power of 2 and fit in a [`u16`].
26 #[derive(Debug)]
27 pub struct VirtQueue<H: Hal, const SIZE: usize> {
28     /// DMA guard
29     layout: VirtQueueLayout<H>,
30     /// Descriptor table
31     ///
32     /// The device may be able to modify this, even though it's not supposed to, so we shouldn't
33     /// trust values read back from it. Use `desc_shadow` instead to keep track of what we wrote to
34     /// it.
35     desc: NonNull<[Descriptor]>,
36     /// Available ring
37     ///
38     /// The device may be able to modify this, even though it's not supposed to, so we shouldn't
39     /// trust values read back from it. The only field we need to read currently is `idx`, so we
40     /// have `avail_idx` below to use instead.
41     avail: NonNull<AvailRing<SIZE>>,
42     /// Used ring
43     used: NonNull<UsedRing<SIZE>>,
44 
45     /// The index of queue
46     queue_idx: u16,
47     /// The number of descriptors currently in use.
48     num_used: u16,
49     /// The head desc index of the free list.
50     free_head: u16,
51     /// Our trusted copy of `desc` that the device can't access.
52     desc_shadow: [Descriptor; SIZE],
53     /// Our trusted copy of `avail.idx`.
54     avail_idx: u16,
55     last_used_idx: u16,
56     /// Whether the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
57     event_idx: bool,
58     #[cfg(feature = "alloc")]
59     indirect: bool,
60     #[cfg(feature = "alloc")]
61     indirect_lists: [Option<NonNull<[Descriptor]>>; SIZE],
62 }
63 
64 impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
65     const SIZE_OK: () = assert!(SIZE.is_power_of_two() && SIZE <= u16::MAX as usize);
66 
67     /// Creates a new VirtQueue.
68     ///
69     /// * `indirect`: Whether to use indirect descriptors. This should be set if the
70     ///   `VIRTIO_F_INDIRECT_DESC` feature has been negotiated with the device.
71     /// * `event_idx`: Whether to use the `used_event` and `avail_event` fields for notification
72     ///   suppression. This should be set if the `VIRTIO_F_EVENT_IDX` feature has been negotiated
73     ///   with the device.
new<T: Transport>( transport: &mut T, idx: u16, indirect: bool, event_idx: bool, ) -> Result<Self>74     pub fn new<T: Transport>(
75         transport: &mut T,
76         idx: u16,
77         indirect: bool,
78         event_idx: bool,
79     ) -> Result<Self> {
80         #[allow(clippy::let_unit_value)]
81         let _ = Self::SIZE_OK;
82 
83         if transport.queue_used(idx) {
84             return Err(Error::AlreadyUsed);
85         }
86         if transport.max_queue_size(idx) < SIZE as u32 {
87             return Err(Error::InvalidParam);
88         }
89         let size = SIZE as u16;
90 
91         let layout = if transport.requires_legacy_layout() {
92             VirtQueueLayout::allocate_legacy(size)?
93         } else {
94             VirtQueueLayout::allocate_flexible(size)?
95         };
96 
97         transport.queue_set(
98             idx,
99             size.into(),
100             layout.descriptors_paddr(),
101             layout.driver_area_paddr(),
102             layout.device_area_paddr(),
103         );
104 
105         let desc =
106             nonnull_slice_from_raw_parts(layout.descriptors_vaddr().cast::<Descriptor>(), SIZE);
107         let avail = layout.avail_vaddr().cast();
108         let used = layout.used_vaddr().cast();
109 
110         let mut desc_shadow: [Descriptor; SIZE] = FromZeroes::new_zeroed();
111         // Link descriptors together.
112         for i in 0..(size - 1) {
113             desc_shadow[i as usize].next = i + 1;
114             // Safe because `desc` is properly aligned, dereferenceable, initialised, and the device
115             // won't access the descriptors for the duration of this unsafe block.
116             unsafe {
117                 (*desc.as_ptr())[i as usize].next = i + 1;
118             }
119         }
120 
121         #[cfg(feature = "alloc")]
122         const NONE: Option<NonNull<[Descriptor]>> = None;
123         Ok(VirtQueue {
124             layout,
125             desc,
126             avail,
127             used,
128             queue_idx: idx,
129             num_used: 0,
130             free_head: 0,
131             desc_shadow,
132             avail_idx: 0,
133             last_used_idx: 0,
134             event_idx,
135             #[cfg(feature = "alloc")]
136             indirect,
137             #[cfg(feature = "alloc")]
138             indirect_lists: [NONE; SIZE],
139         })
140     }
141 
142     /// Add buffers to the virtqueue, return a token.
143     ///
144     /// The buffers must not be empty.
145     ///
146     /// Ref: linux virtio_ring.c virtqueue_add
147     ///
148     /// # Safety
149     ///
150     /// The input and output buffers must remain valid and not be accessed until a call to
151     /// `pop_used` with the returned token succeeds.
add<'a, 'b>( &mut self, inputs: &'a [&'b [u8]], outputs: &'a mut [&'b mut [u8]], ) -> Result<u16>152     pub unsafe fn add<'a, 'b>(
153         &mut self,
154         inputs: &'a [&'b [u8]],
155         outputs: &'a mut [&'b mut [u8]],
156     ) -> Result<u16> {
157         if inputs.is_empty() && outputs.is_empty() {
158             return Err(Error::InvalidParam);
159         }
160         let descriptors_needed = inputs.len() + outputs.len();
161         // Only consider indirect descriptors if the alloc feature is enabled, as they require
162         // allocation.
163         #[cfg(feature = "alloc")]
164         if self.num_used as usize + 1 > SIZE
165             || descriptors_needed > SIZE
166             || (!self.indirect && self.num_used as usize + descriptors_needed > SIZE)
167         {
168             return Err(Error::QueueFull);
169         }
170         #[cfg(not(feature = "alloc"))]
171         if self.num_used as usize + descriptors_needed > SIZE {
172             return Err(Error::QueueFull);
173         }
174 
175         #[cfg(feature = "alloc")]
176         let head = if self.indirect && descriptors_needed > 1 {
177             self.add_indirect(inputs, outputs)
178         } else {
179             self.add_direct(inputs, outputs)
180         };
181         #[cfg(not(feature = "alloc"))]
182         let head = self.add_direct(inputs, outputs);
183 
184         let avail_slot = self.avail_idx & (SIZE as u16 - 1);
185         // Safe because self.avail is properly aligned, dereferenceable and initialised.
186         unsafe {
187             (*self.avail.as_ptr()).ring[avail_slot as usize] = head;
188         }
189 
190         // Write barrier so that device sees changes to descriptor table and available ring before
191         // change to available index.
192         fence(Ordering::SeqCst);
193 
194         // increase head of avail ring
195         self.avail_idx = self.avail_idx.wrapping_add(1);
196         // Safe because self.avail is properly aligned, dereferenceable and initialised.
197         unsafe {
198             (*self.avail.as_ptr())
199                 .idx
200                 .store(self.avail_idx, Ordering::Release);
201         }
202 
203         Ok(head)
204     }
205 
add_direct<'a, 'b>( &mut self, inputs: &'a [&'b [u8]], outputs: &'a mut [&'b mut [u8]], ) -> u16206     fn add_direct<'a, 'b>(
207         &mut self,
208         inputs: &'a [&'b [u8]],
209         outputs: &'a mut [&'b mut [u8]],
210     ) -> u16 {
211         // allocate descriptors from free list
212         let head = self.free_head;
213         let mut last = self.free_head;
214 
215         for (buffer, direction) in InputOutputIter::new(inputs, outputs) {
216             assert_ne!(buffer.len(), 0);
217 
218             // Write to desc_shadow then copy.
219             let desc = &mut self.desc_shadow[usize::from(self.free_head)];
220             // Safe because our caller promises that the buffers live at least until `pop_used`
221             // returns them.
222             unsafe {
223                 desc.set_buf::<H>(buffer, direction, DescFlags::NEXT);
224             }
225             last = self.free_head;
226             self.free_head = desc.next;
227 
228             self.write_desc(last);
229         }
230 
231         // set last_elem.next = NULL
232         self.desc_shadow[usize::from(last)]
233             .flags
234             .remove(DescFlags::NEXT);
235         self.write_desc(last);
236 
237         self.num_used += (inputs.len() + outputs.len()) as u16;
238 
239         head
240     }
241 
242     #[cfg(feature = "alloc")]
add_indirect<'a, 'b>( &mut self, inputs: &'a [&'b [u8]], outputs: &'a mut [&'b mut [u8]], ) -> u16243     fn add_indirect<'a, 'b>(
244         &mut self,
245         inputs: &'a [&'b [u8]],
246         outputs: &'a mut [&'b mut [u8]],
247     ) -> u16 {
248         let head = self.free_head;
249 
250         // Allocate and fill in indirect descriptor list.
251         let mut indirect_list = Descriptor::new_box_slice_zeroed(inputs.len() + outputs.len());
252         for (i, (buffer, direction)) in InputOutputIter::new(inputs, outputs).enumerate() {
253             let desc = &mut indirect_list[i];
254             // Safe because our caller promises that the buffers live at least until `pop_used`
255             // returns them.
256             unsafe {
257                 desc.set_buf::<H>(buffer, direction, DescFlags::NEXT);
258             }
259             desc.next = (i + 1) as u16;
260         }
261         indirect_list
262             .last_mut()
263             .unwrap()
264             .flags
265             .remove(DescFlags::NEXT);
266 
267         // Need to store pointer to indirect_list too, because direct_desc.set_buf will only store
268         // the physical DMA address which might be different.
269         assert!(self.indirect_lists[usize::from(head)].is_none());
270         self.indirect_lists[usize::from(head)] = Some(indirect_list.as_mut().into());
271 
272         // Write a descriptor pointing to indirect descriptor list. We use Box::leak to prevent the
273         // indirect list from being freed when this function returns; recycle_descriptors is instead
274         // responsible for freeing the memory after the buffer chain is popped.
275         let direct_desc = &mut self.desc_shadow[usize::from(head)];
276         self.free_head = direct_desc.next;
277         unsafe {
278             direct_desc.set_buf::<H>(
279                 Box::leak(indirect_list).as_bytes().into(),
280                 BufferDirection::DriverToDevice,
281                 DescFlags::INDIRECT,
282             );
283         }
284         self.write_desc(head);
285         self.num_used += 1;
286 
287         head
288     }
289 
290     /// Add the given buffers to the virtqueue, notifies the device, blocks until the device uses
291     /// them, then pops them.
292     ///
293     /// This assumes that the device isn't processing any other buffers at the same time.
294     ///
295     /// The buffers must not be empty.
add_notify_wait_pop<'a>( &mut self, inputs: &'a [&'a [u8]], outputs: &'a mut [&'a mut [u8]], transport: &mut impl Transport, ) -> Result<u32>296     pub fn add_notify_wait_pop<'a>(
297         &mut self,
298         inputs: &'a [&'a [u8]],
299         outputs: &'a mut [&'a mut [u8]],
300         transport: &mut impl Transport,
301     ) -> Result<u32> {
302         // Safe because we don't return until the same token has been popped, so the buffers remain
303         // valid and are not otherwise accessed until then.
304         let token = unsafe { self.add(inputs, outputs) }?;
305 
306         // Notify the queue.
307         if self.should_notify() {
308             transport.notify(self.queue_idx);
309         }
310 
311         // Wait until there is at least one element in the used ring.
312         while !self.can_pop() {
313             spin_loop();
314         }
315 
316         // Safe because these are the same buffers as we passed to `add` above and they are still
317         // valid.
318         unsafe { self.pop_used(token, inputs, outputs) }
319     }
320 
321     /// Advise the device whether used buffer notifications are needed.
322     ///
323     /// See Virtio v1.1 2.6.7 Used Buffer Notification Suppression
set_dev_notify(&mut self, enable: bool)324     pub fn set_dev_notify(&mut self, enable: bool) {
325         let avail_ring_flags = if enable { 0x0000 } else { 0x0001 };
326         if !self.event_idx {
327             // Safe because self.avail points to a valid, aligned, initialised, dereferenceable, readable
328             // instance of AvailRing.
329             unsafe {
330                 (*self.avail.as_ptr())
331                     .flags
332                     .store(avail_ring_flags, Ordering::Release)
333             }
334         }
335     }
336 
337     /// Returns whether the driver should notify the device after adding a new buffer to the
338     /// virtqueue.
339     ///
340     /// This will be false if the device has supressed notifications.
should_notify(&self) -> bool341     pub fn should_notify(&self) -> bool {
342         if self.event_idx {
343             // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
344             // instance of UsedRing.
345             let avail_event = unsafe { (*self.used.as_ptr()).avail_event.load(Ordering::Acquire) };
346             self.avail_idx >= avail_event.wrapping_add(1)
347         } else {
348             // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
349             // instance of UsedRing.
350             unsafe { (*self.used.as_ptr()).flags.load(Ordering::Acquire) & 0x0001 == 0 }
351         }
352     }
353 
354     /// Copies the descriptor at the given index from `desc_shadow` to `desc`, so it can be seen by
355     /// the device.
write_desc(&mut self, index: u16)356     fn write_desc(&mut self, index: u16) {
357         let index = usize::from(index);
358         // Safe because self.desc is properly aligned, dereferenceable and initialised, and nothing
359         // else reads or writes the descriptor during this block.
360         unsafe {
361             (*self.desc.as_ptr())[index] = self.desc_shadow[index].clone();
362         }
363     }
364 
365     /// Returns whether there is a used element that can be popped.
can_pop(&self) -> bool366     pub fn can_pop(&self) -> bool {
367         // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
368         // instance of UsedRing.
369         self.last_used_idx != unsafe { (*self.used.as_ptr()).idx.load(Ordering::Acquire) }
370     }
371 
372     /// Returns the descriptor index (a.k.a. token) of the next used element without popping it, or
373     /// `None` if the used ring is empty.
peek_used(&self) -> Option<u16>374     pub fn peek_used(&self) -> Option<u16> {
375         if self.can_pop() {
376             let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
377             // Safe because self.used points to a valid, aligned, initialised, dereferenceable,
378             // readable instance of UsedRing.
379             Some(unsafe { (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16 })
380         } else {
381             None
382         }
383     }
384 
385     /// Returns the number of free descriptors.
available_desc(&self) -> usize386     pub fn available_desc(&self) -> usize {
387         #[cfg(feature = "alloc")]
388         if self.indirect {
389             return if usize::from(self.num_used) == SIZE {
390                 0
391             } else {
392                 SIZE
393             };
394         }
395 
396         SIZE - usize::from(self.num_used)
397     }
398 
399     /// Unshares buffers in the list starting at descriptor index `head` and adds them to the free
400     /// list. Unsharing may involve copying data back to the original buffers, so they must be
401     /// passed in too.
402     ///
403     /// This will push all linked descriptors at the front of the free list.
404     ///
405     /// # Safety
406     ///
407     /// The buffers in `inputs` and `outputs` must match the set of buffers originally added to the
408     /// queue by `add`.
recycle_descriptors<'a>( &mut self, head: u16, inputs: &'a [&'a [u8]], outputs: &'a mut [&'a mut [u8]], )409     unsafe fn recycle_descriptors<'a>(
410         &mut self,
411         head: u16,
412         inputs: &'a [&'a [u8]],
413         outputs: &'a mut [&'a mut [u8]],
414     ) {
415         let original_free_head = self.free_head;
416         self.free_head = head;
417 
418         let head_desc = &mut self.desc_shadow[usize::from(head)];
419         if head_desc.flags.contains(DescFlags::INDIRECT) {
420             #[cfg(feature = "alloc")]
421             {
422                 // Find the indirect descriptor list, unshare it and move its descriptor to the free
423                 // list.
424                 let indirect_list = self.indirect_lists[usize::from(head)].take().unwrap();
425                 // SAFETY: We allocated the indirect list in `add_indirect`, and the device has
426                 // finished accessing it by this point.
427                 let mut indirect_list = unsafe { Box::from_raw(indirect_list.as_ptr()) };
428                 let paddr = head_desc.addr;
429                 head_desc.unset_buf();
430                 self.num_used -= 1;
431                 head_desc.next = original_free_head;
432 
433                 unsafe {
434                     H::unshare(
435                         paddr as usize,
436                         indirect_list.as_bytes_mut().into(),
437                         BufferDirection::DriverToDevice,
438                     );
439                 }
440 
441                 // Unshare the buffers in the indirect descriptor list, and free it.
442                 assert_eq!(indirect_list.len(), inputs.len() + outputs.len());
443                 for (i, (buffer, direction)) in InputOutputIter::new(inputs, outputs).enumerate() {
444                     assert_ne!(buffer.len(), 0);
445 
446                     // SAFETY: The caller ensures that the buffer is valid and matches the
447                     // descriptor from which we got `paddr`.
448                     unsafe {
449                         // Unshare the buffer (and perhaps copy its contents back to the original
450                         // buffer).
451                         H::unshare(indirect_list[i].addr as usize, buffer, direction);
452                     }
453                 }
454                 drop(indirect_list);
455             }
456         } else {
457             let mut next = Some(head);
458 
459             for (buffer, direction) in InputOutputIter::new(inputs, outputs) {
460                 assert_ne!(buffer.len(), 0);
461 
462                 let desc_index = next.expect("Descriptor chain was shorter than expected.");
463                 let desc = &mut self.desc_shadow[usize::from(desc_index)];
464 
465                 let paddr = desc.addr;
466                 desc.unset_buf();
467                 self.num_used -= 1;
468                 next = desc.next();
469                 if next.is_none() {
470                     desc.next = original_free_head;
471                 }
472 
473                 self.write_desc(desc_index);
474 
475                 // SAFETY: The caller ensures that the buffer is valid and matches the descriptor
476                 // from which we got `paddr`.
477                 unsafe {
478                     // Unshare the buffer (and perhaps copy its contents back to the original buffer).
479                     H::unshare(paddr as usize, buffer, direction);
480                 }
481             }
482 
483             if next.is_some() {
484                 panic!("Descriptor chain was longer than expected.");
485             }
486         }
487     }
488 
489     /// If the given token is next on the device used queue, pops it and returns the total buffer
490     /// length which was used (written) by the device.
491     ///
492     /// Ref: linux virtio_ring.c virtqueue_get_buf_ctx
493     ///
494     /// # Safety
495     ///
496     /// The buffers in `inputs` and `outputs` must match the set of buffers originally added to the
497     /// queue by `add` when it returned the token being passed in here.
pop_used<'a>( &mut self, token: u16, inputs: &'a [&'a [u8]], outputs: &'a mut [&'a mut [u8]], ) -> Result<u32>498     pub unsafe fn pop_used<'a>(
499         &mut self,
500         token: u16,
501         inputs: &'a [&'a [u8]],
502         outputs: &'a mut [&'a mut [u8]],
503     ) -> Result<u32> {
504         if !self.can_pop() {
505             return Err(Error::NotReady);
506         }
507 
508         // Get the index of the start of the descriptor chain for the next element in the used ring.
509         let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
510         let index;
511         let len;
512         // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
513         // instance of UsedRing.
514         unsafe {
515             index = (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16;
516             len = (*self.used.as_ptr()).ring[last_used_slot as usize].len;
517         }
518 
519         if index != token {
520             // The device used a different descriptor chain to the one we were expecting.
521             return Err(Error::WrongToken);
522         }
523 
524         // Safe because the caller ensures the buffers are valid and match the descriptor.
525         unsafe {
526             self.recycle_descriptors(index, inputs, outputs);
527         }
528         self.last_used_idx = self.last_used_idx.wrapping_add(1);
529 
530         if self.event_idx {
531             unsafe {
532                 (*self.avail.as_ptr())
533                     .used_event
534                     .store(self.last_used_idx, Ordering::Release);
535             }
536         }
537 
538         Ok(len)
539     }
540 }
541 
542 // SAFETY: None of the virt queue resources are tied to a particular thread.
543 unsafe impl<H: Hal, const SIZE: usize> Send for VirtQueue<H, SIZE> {}
544 
545 // SAFETY: A `&VirtQueue` only allows reading from the various pointers it contains, so there is no
546 // data race.
547 unsafe impl<H: Hal, const SIZE: usize> Sync for VirtQueue<H, SIZE> {}
548 
549 /// The inner layout of a VirtQueue.
550 ///
551 /// Ref: 2.6 Split Virtqueues
552 #[derive(Debug)]
553 enum VirtQueueLayout<H: Hal> {
554     Legacy {
555         dma: Dma<H>,
556         avail_offset: usize,
557         used_offset: usize,
558     },
559     Modern {
560         /// The region used for the descriptor area and driver area.
561         driver_to_device_dma: Dma<H>,
562         /// The region used for the device area.
563         device_to_driver_dma: Dma<H>,
564         /// The offset from the start of the `driver_to_device_dma` region to the driver area
565         /// (available ring).
566         avail_offset: usize,
567     },
568 }
569 
570 impl<H: Hal> VirtQueueLayout<H> {
571     /// Allocates a single DMA region containing all parts of the virtqueue, following the layout
572     /// required by legacy interfaces.
573     ///
574     /// Ref: 2.6.2 Legacy Interfaces: A Note on Virtqueue Layout
allocate_legacy(queue_size: u16) -> Result<Self>575     fn allocate_legacy(queue_size: u16) -> Result<Self> {
576         let (desc, avail, used) = queue_part_sizes(queue_size);
577         let size = align_up(desc + avail) + align_up(used);
578         // Allocate contiguous pages.
579         let dma = Dma::new(size / PAGE_SIZE, BufferDirection::Both)?;
580         Ok(Self::Legacy {
581             dma,
582             avail_offset: desc,
583             used_offset: align_up(desc + avail),
584         })
585     }
586 
587     /// Allocates separate DMA regions for the the different parts of the virtqueue, as supported by
588     /// non-legacy interfaces.
589     ///
590     /// This is preferred over `allocate_legacy` where possible as it reduces memory fragmentation
591     /// and allows the HAL to know which DMA regions are used in which direction.
allocate_flexible(queue_size: u16) -> Result<Self>592     fn allocate_flexible(queue_size: u16) -> Result<Self> {
593         let (desc, avail, used) = queue_part_sizes(queue_size);
594         let driver_to_device_dma = Dma::new(pages(desc + avail), BufferDirection::DriverToDevice)?;
595         let device_to_driver_dma = Dma::new(pages(used), BufferDirection::DeviceToDriver)?;
596         Ok(Self::Modern {
597             driver_to_device_dma,
598             device_to_driver_dma,
599             avail_offset: desc,
600         })
601     }
602 
603     /// Returns the physical address of the descriptor area.
descriptors_paddr(&self) -> PhysAddr604     fn descriptors_paddr(&self) -> PhysAddr {
605         match self {
606             Self::Legacy { dma, .. } => dma.paddr(),
607             Self::Modern {
608                 driver_to_device_dma,
609                 ..
610             } => driver_to_device_dma.paddr(),
611         }
612     }
613 
614     /// Returns a pointer to the descriptor table (in the descriptor area).
descriptors_vaddr(&self) -> NonNull<u8>615     fn descriptors_vaddr(&self) -> NonNull<u8> {
616         match self {
617             Self::Legacy { dma, .. } => dma.vaddr(0),
618             Self::Modern {
619                 driver_to_device_dma,
620                 ..
621             } => driver_to_device_dma.vaddr(0),
622         }
623     }
624 
625     /// Returns the physical address of the driver area.
driver_area_paddr(&self) -> PhysAddr626     fn driver_area_paddr(&self) -> PhysAddr {
627         match self {
628             Self::Legacy {
629                 dma, avail_offset, ..
630             } => dma.paddr() + avail_offset,
631             Self::Modern {
632                 driver_to_device_dma,
633                 avail_offset,
634                 ..
635             } => driver_to_device_dma.paddr() + avail_offset,
636         }
637     }
638 
639     /// Returns a pointer to the available ring (in the driver area).
avail_vaddr(&self) -> NonNull<u8>640     fn avail_vaddr(&self) -> NonNull<u8> {
641         match self {
642             Self::Legacy {
643                 dma, avail_offset, ..
644             } => dma.vaddr(*avail_offset),
645             Self::Modern {
646                 driver_to_device_dma,
647                 avail_offset,
648                 ..
649             } => driver_to_device_dma.vaddr(*avail_offset),
650         }
651     }
652 
653     /// Returns the physical address of the device area.
device_area_paddr(&self) -> PhysAddr654     fn device_area_paddr(&self) -> PhysAddr {
655         match self {
656             Self::Legacy {
657                 used_offset, dma, ..
658             } => dma.paddr() + used_offset,
659             Self::Modern {
660                 device_to_driver_dma,
661                 ..
662             } => device_to_driver_dma.paddr(),
663         }
664     }
665 
666     /// Returns a pointer to the used ring (in the driver area).
used_vaddr(&self) -> NonNull<u8>667     fn used_vaddr(&self) -> NonNull<u8> {
668         match self {
669             Self::Legacy {
670                 dma, used_offset, ..
671             } => dma.vaddr(*used_offset),
672             Self::Modern {
673                 device_to_driver_dma,
674                 ..
675             } => device_to_driver_dma.vaddr(0),
676         }
677     }
678 }
679 
680 /// Returns the size in bytes of the descriptor table, available ring and used ring for a given
681 /// queue size.
682 ///
683 /// Ref: 2.6 Split Virtqueues
queue_part_sizes(queue_size: u16) -> (usize, usize, usize)684 fn queue_part_sizes(queue_size: u16) -> (usize, usize, usize) {
685     assert!(
686         queue_size.is_power_of_two(),
687         "queue size should be a power of 2"
688     );
689     let queue_size = queue_size as usize;
690     let desc = size_of::<Descriptor>() * queue_size;
691     let avail = size_of::<u16>() * (3 + queue_size);
692     let used = size_of::<u16>() * 3 + size_of::<UsedElem>() * queue_size;
693     (desc, avail, used)
694 }
695 
696 #[repr(C, align(16))]
697 #[derive(AsBytes, Clone, Debug, FromBytes, FromZeroes)]
698 pub(crate) struct Descriptor {
699     addr: u64,
700     len: u32,
701     flags: DescFlags,
702     next: u16,
703 }
704 
705 impl Descriptor {
706     /// Sets the buffer address, length and flags, and shares it with the device.
707     ///
708     /// # Safety
709     ///
710     /// The caller must ensure that the buffer lives at least as long as the descriptor is active.
set_buf<H: Hal>( &mut self, buf: NonNull<[u8]>, direction: BufferDirection, extra_flags: DescFlags, )711     unsafe fn set_buf<H: Hal>(
712         &mut self,
713         buf: NonNull<[u8]>,
714         direction: BufferDirection,
715         extra_flags: DescFlags,
716     ) {
717         // Safe because our caller promises that the buffer is valid.
718         unsafe {
719             self.addr = H::share(buf, direction) as u64;
720         }
721         self.len = buf.len().try_into().unwrap();
722         self.flags = extra_flags
723             | match direction {
724                 BufferDirection::DeviceToDriver => DescFlags::WRITE,
725                 BufferDirection::DriverToDevice => DescFlags::empty(),
726                 BufferDirection::Both => {
727                     panic!("Buffer passed to device should never use BufferDirection::Both.")
728                 }
729             };
730     }
731 
732     /// Sets the buffer address and length to 0.
733     ///
734     /// This must only be called once the device has finished using the descriptor.
unset_buf(&mut self)735     fn unset_buf(&mut self) {
736         self.addr = 0;
737         self.len = 0;
738     }
739 
740     /// Returns the index of the next descriptor in the chain if the `NEXT` flag is set, or `None`
741     /// if it is not (and thus this descriptor is the end of the chain).
next(&self) -> Option<u16>742     fn next(&self) -> Option<u16> {
743         if self.flags.contains(DescFlags::NEXT) {
744             Some(self.next)
745         } else {
746             None
747         }
748     }
749 }
750 
751 /// Descriptor flags
752 #[derive(AsBytes, Copy, Clone, Debug, Default, Eq, FromBytes, FromZeroes, PartialEq)]
753 #[repr(transparent)]
754 struct DescFlags(u16);
755 
756 bitflags! {
757     impl DescFlags: u16 {
758         const NEXT = 1;
759         const WRITE = 2;
760         const INDIRECT = 4;
761     }
762 }
763 
764 /// The driver uses the available ring to offer buffers to the device:
765 /// each ring entry refers to the head of a descriptor chain.
766 /// It is only written by the driver and read by the device.
767 #[repr(C)]
768 #[derive(Debug)]
769 struct AvailRing<const SIZE: usize> {
770     flags: AtomicU16,
771     /// A driver MUST NOT decrement the idx.
772     idx: AtomicU16,
773     ring: [u16; SIZE],
774     /// Only used if `VIRTIO_F_EVENT_IDX` is negotiated.
775     used_event: AtomicU16,
776 }
777 
778 /// The used ring is where the device returns buffers once it is done with them:
779 /// it is only written to by the device, and read by the driver.
780 #[repr(C)]
781 #[derive(Debug)]
782 struct UsedRing<const SIZE: usize> {
783     flags: AtomicU16,
784     idx: AtomicU16,
785     ring: [UsedElem; SIZE],
786     /// Only used if `VIRTIO_F_EVENT_IDX` is negotiated.
787     avail_event: AtomicU16,
788 }
789 
790 #[repr(C)]
791 #[derive(Debug)]
792 struct UsedElem {
793     id: u32,
794     len: u32,
795 }
796 
797 struct InputOutputIter<'a, 'b> {
798     inputs: &'a [&'b [u8]],
799     outputs: &'a mut [&'b mut [u8]],
800 }
801 
802 impl<'a, 'b> InputOutputIter<'a, 'b> {
new(inputs: &'a [&'b [u8]], outputs: &'a mut [&'b mut [u8]]) -> Self803     fn new(inputs: &'a [&'b [u8]], outputs: &'a mut [&'b mut [u8]]) -> Self {
804         Self { inputs, outputs }
805     }
806 }
807 
808 impl<'a, 'b> Iterator for InputOutputIter<'a, 'b> {
809     type Item = (NonNull<[u8]>, BufferDirection);
810 
next(&mut self) -> Option<Self::Item>811     fn next(&mut self) -> Option<Self::Item> {
812         if let Some(input) = take_first(&mut self.inputs) {
813             Some(((*input).into(), BufferDirection::DriverToDevice))
814         } else {
815             let output = take_first_mut(&mut self.outputs)?;
816             Some(((*output).into(), BufferDirection::DeviceToDriver))
817         }
818     }
819 }
820 
821 // TODO: Use `slice::take_first` once it is stable
822 // (https://github.com/rust-lang/rust/issues/62280).
take_first<'a, T>(slice: &mut &'a [T]) -> Option<&'a T>823 fn take_first<'a, T>(slice: &mut &'a [T]) -> Option<&'a T> {
824     let (first, rem) = slice.split_first()?;
825     *slice = rem;
826     Some(first)
827 }
828 
829 // TODO: Use `slice::take_first_mut` once it is stable
830 // (https://github.com/rust-lang/rust/issues/62280).
take_first_mut<'a, T>(slice: &mut &'a mut [T]) -> Option<&'a mut T>831 fn take_first_mut<'a, T>(slice: &mut &'a mut [T]) -> Option<&'a mut T> {
832     let (first, rem) = take(slice).split_first_mut()?;
833     *slice = rem;
834     Some(first)
835 }
836 
837 /// Simulates the device reading from a VirtIO queue and writing a response back, for use in tests.
838 ///
839 /// The fake device always uses descriptors in order.
840 #[cfg(test)]
fake_read_write_queue<const QUEUE_SIZE: usize>( descriptors: *const [Descriptor; QUEUE_SIZE], queue_driver_area: *const u8, queue_device_area: *mut u8, handler: impl FnOnce(Vec<u8>) -> Vec<u8>, )841 pub(crate) fn fake_read_write_queue<const QUEUE_SIZE: usize>(
842     descriptors: *const [Descriptor; QUEUE_SIZE],
843     queue_driver_area: *const u8,
844     queue_device_area: *mut u8,
845     handler: impl FnOnce(Vec<u8>) -> Vec<u8>,
846 ) {
847     use core::{ops::Deref, slice};
848 
849     let available_ring = queue_driver_area as *const AvailRing<QUEUE_SIZE>;
850     let used_ring = queue_device_area as *mut UsedRing<QUEUE_SIZE>;
851 
852     // Safe because the various pointers are properly aligned, dereferenceable, initialised, and
853     // nothing else accesses them during this block.
854     unsafe {
855         // Make sure there is actually at least one descriptor available to read from.
856         assert_ne!(
857             (*available_ring).idx.load(Ordering::Acquire),
858             (*used_ring).idx.load(Ordering::Acquire)
859         );
860         // The fake device always uses descriptors in order, like VIRTIO_F_IN_ORDER, so
861         // `used_ring.idx` marks the next descriptor we should take from the available ring.
862         let next_slot = (*used_ring).idx.load(Ordering::Acquire) & (QUEUE_SIZE as u16 - 1);
863         let head_descriptor_index = (*available_ring).ring[next_slot as usize];
864         let mut descriptor = &(*descriptors)[head_descriptor_index as usize];
865 
866         let input_length;
867         let output;
868         if descriptor.flags.contains(DescFlags::INDIRECT) {
869             // The descriptor shouldn't have any other flags if it is indirect.
870             assert_eq!(descriptor.flags, DescFlags::INDIRECT);
871 
872             // Loop through all input descriptors in the indirect descriptor list, reading data from
873             // them.
874             let indirect_descriptor_list: &[Descriptor] = zerocopy::Ref::new_slice(
875                 slice::from_raw_parts(descriptor.addr as *const u8, descriptor.len as usize),
876             )
877             .unwrap()
878             .into_slice();
879             let mut input = Vec::new();
880             let mut indirect_descriptor_index = 0;
881             while indirect_descriptor_index < indirect_descriptor_list.len() {
882                 let indirect_descriptor = &indirect_descriptor_list[indirect_descriptor_index];
883                 if indirect_descriptor.flags.contains(DescFlags::WRITE) {
884                     break;
885                 }
886 
887                 input.extend_from_slice(slice::from_raw_parts(
888                     indirect_descriptor.addr as *const u8,
889                     indirect_descriptor.len as usize,
890                 ));
891 
892                 indirect_descriptor_index += 1;
893             }
894             input_length = input.len();
895 
896             // Let the test handle the request.
897             output = handler(input);
898 
899             // Write the response to the remaining descriptors.
900             let mut remaining_output = output.deref();
901             while indirect_descriptor_index < indirect_descriptor_list.len() {
902                 let indirect_descriptor = &indirect_descriptor_list[indirect_descriptor_index];
903                 assert!(indirect_descriptor.flags.contains(DescFlags::WRITE));
904 
905                 let length_to_write = min(remaining_output.len(), indirect_descriptor.len as usize);
906                 ptr::copy(
907                     remaining_output.as_ptr(),
908                     indirect_descriptor.addr as *mut u8,
909                     length_to_write,
910                 );
911                 remaining_output = &remaining_output[length_to_write..];
912 
913                 indirect_descriptor_index += 1;
914             }
915             assert_eq!(remaining_output.len(), 0);
916         } else {
917             // Loop through all input descriptors in the chain, reading data from them.
918             let mut input = Vec::new();
919             while !descriptor.flags.contains(DescFlags::WRITE) {
920                 input.extend_from_slice(slice::from_raw_parts(
921                     descriptor.addr as *const u8,
922                     descriptor.len as usize,
923                 ));
924 
925                 if let Some(next) = descriptor.next() {
926                     descriptor = &(*descriptors)[next as usize];
927                 } else {
928                     break;
929                 }
930             }
931             input_length = input.len();
932 
933             // Let the test handle the request.
934             output = handler(input);
935 
936             // Write the response to the remaining descriptors.
937             let mut remaining_output = output.deref();
938             if descriptor.flags.contains(DescFlags::WRITE) {
939                 loop {
940                     assert!(descriptor.flags.contains(DescFlags::WRITE));
941 
942                     let length_to_write = min(remaining_output.len(), descriptor.len as usize);
943                     ptr::copy(
944                         remaining_output.as_ptr(),
945                         descriptor.addr as *mut u8,
946                         length_to_write,
947                     );
948                     remaining_output = &remaining_output[length_to_write..];
949 
950                     if let Some(next) = descriptor.next() {
951                         descriptor = &(*descriptors)[next as usize];
952                     } else {
953                         break;
954                     }
955                 }
956             }
957             assert_eq!(remaining_output.len(), 0);
958         }
959 
960         // Mark the buffer as used.
961         (*used_ring).ring[next_slot as usize].id = head_descriptor_index.into();
962         (*used_ring).ring[next_slot as usize].len = (input_length + output.len()) as u32;
963         (*used_ring).idx.fetch_add(1, Ordering::AcqRel);
964     }
965 }
966 
967 #[cfg(test)]
968 mod tests {
969     use super::*;
970     use crate::{
971         device::common::Feature,
972         hal::fake::FakeHal,
973         transport::{
974             fake::{FakeTransport, QueueStatus, State},
975             mmio::{MmioTransport, VirtIOHeader, MODERN_VERSION},
976             DeviceType,
977         },
978     };
979     use core::ptr::NonNull;
980     use std::sync::{Arc, Mutex};
981 
982     #[test]
queue_too_big()983     fn queue_too_big() {
984         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
985         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
986         assert_eq!(
987             VirtQueue::<FakeHal, 8>::new(&mut transport, 0, false, false).unwrap_err(),
988             Error::InvalidParam
989         );
990     }
991 
992     #[test]
queue_already_used()993     fn queue_already_used() {
994         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
995         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
996         VirtQueue::<FakeHal, 4>::new(&mut transport, 0, false, false).unwrap();
997         assert_eq!(
998             VirtQueue::<FakeHal, 4>::new(&mut transport, 0, false, false).unwrap_err(),
999             Error::AlreadyUsed
1000         );
1001     }
1002 
1003     #[test]
add_empty()1004     fn add_empty() {
1005         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
1006         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
1007         let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0, false, false).unwrap();
1008         assert_eq!(
1009             unsafe { queue.add(&[], &mut []) }.unwrap_err(),
1010             Error::InvalidParam
1011         );
1012     }
1013 
1014     #[test]
add_too_many()1015     fn add_too_many() {
1016         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
1017         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
1018         let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0, false, false).unwrap();
1019         assert_eq!(queue.available_desc(), 4);
1020         assert_eq!(
1021             unsafe { queue.add(&[&[], &[], &[]], &mut [&mut [], &mut []]) }.unwrap_err(),
1022             Error::QueueFull
1023         );
1024     }
1025 
1026     #[test]
add_buffers()1027     fn add_buffers() {
1028         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
1029         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
1030         let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0, false, false).unwrap();
1031         assert_eq!(queue.available_desc(), 4);
1032 
1033         // Add a buffer chain consisting of two device-readable parts followed by two
1034         // device-writable parts.
1035         let token = unsafe { queue.add(&[&[1, 2], &[3]], &mut [&mut [0, 0], &mut [0]]) }.unwrap();
1036 
1037         assert_eq!(queue.available_desc(), 0);
1038         assert!(!queue.can_pop());
1039 
1040         // Safe because the various parts of the queue are properly aligned, dereferenceable and
1041         // initialised, and nothing else is accessing them at the same time.
1042         unsafe {
1043             let first_descriptor_index = (*queue.avail.as_ptr()).ring[0];
1044             assert_eq!(first_descriptor_index, token);
1045             assert_eq!(
1046                 (*queue.desc.as_ptr())[first_descriptor_index as usize].len,
1047                 2
1048             );
1049             assert_eq!(
1050                 (*queue.desc.as_ptr())[first_descriptor_index as usize].flags,
1051                 DescFlags::NEXT
1052             );
1053             let second_descriptor_index =
1054                 (*queue.desc.as_ptr())[first_descriptor_index as usize].next;
1055             assert_eq!(
1056                 (*queue.desc.as_ptr())[second_descriptor_index as usize].len,
1057                 1
1058             );
1059             assert_eq!(
1060                 (*queue.desc.as_ptr())[second_descriptor_index as usize].flags,
1061                 DescFlags::NEXT
1062             );
1063             let third_descriptor_index =
1064                 (*queue.desc.as_ptr())[second_descriptor_index as usize].next;
1065             assert_eq!(
1066                 (*queue.desc.as_ptr())[third_descriptor_index as usize].len,
1067                 2
1068             );
1069             assert_eq!(
1070                 (*queue.desc.as_ptr())[third_descriptor_index as usize].flags,
1071                 DescFlags::NEXT | DescFlags::WRITE
1072             );
1073             let fourth_descriptor_index =
1074                 (*queue.desc.as_ptr())[third_descriptor_index as usize].next;
1075             assert_eq!(
1076                 (*queue.desc.as_ptr())[fourth_descriptor_index as usize].len,
1077                 1
1078             );
1079             assert_eq!(
1080                 (*queue.desc.as_ptr())[fourth_descriptor_index as usize].flags,
1081                 DescFlags::WRITE
1082             );
1083         }
1084     }
1085 
1086     #[cfg(feature = "alloc")]
1087     #[test]
add_buffers_indirect()1088     fn add_buffers_indirect() {
1089         use core::ptr::slice_from_raw_parts;
1090 
1091         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
1092         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
1093         let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0, true, false).unwrap();
1094         assert_eq!(queue.available_desc(), 4);
1095 
1096         // Add a buffer chain consisting of two device-readable parts followed by two
1097         // device-writable parts.
1098         let token = unsafe { queue.add(&[&[1, 2], &[3]], &mut [&mut [0, 0], &mut [0]]) }.unwrap();
1099 
1100         assert_eq!(queue.available_desc(), 4);
1101         assert!(!queue.can_pop());
1102 
1103         // Safe because the various parts of the queue are properly aligned, dereferenceable and
1104         // initialised, and nothing else is accessing them at the same time.
1105         unsafe {
1106             let indirect_descriptor_index = (*queue.avail.as_ptr()).ring[0];
1107             assert_eq!(indirect_descriptor_index, token);
1108             assert_eq!(
1109                 (*queue.desc.as_ptr())[indirect_descriptor_index as usize].len as usize,
1110                 4 * size_of::<Descriptor>()
1111             );
1112             assert_eq!(
1113                 (*queue.desc.as_ptr())[indirect_descriptor_index as usize].flags,
1114                 DescFlags::INDIRECT
1115             );
1116 
1117             let indirect_descriptors = slice_from_raw_parts(
1118                 (*queue.desc.as_ptr())[indirect_descriptor_index as usize].addr
1119                     as *const Descriptor,
1120                 4,
1121             );
1122             assert_eq!((*indirect_descriptors)[0].len, 2);
1123             assert_eq!((*indirect_descriptors)[0].flags, DescFlags::NEXT);
1124             assert_eq!((*indirect_descriptors)[0].next, 1);
1125             assert_eq!((*indirect_descriptors)[1].len, 1);
1126             assert_eq!((*indirect_descriptors)[1].flags, DescFlags::NEXT);
1127             assert_eq!((*indirect_descriptors)[1].next, 2);
1128             assert_eq!((*indirect_descriptors)[2].len, 2);
1129             assert_eq!(
1130                 (*indirect_descriptors)[2].flags,
1131                 DescFlags::NEXT | DescFlags::WRITE
1132             );
1133             assert_eq!((*indirect_descriptors)[2].next, 3);
1134             assert_eq!((*indirect_descriptors)[3].len, 1);
1135             assert_eq!((*indirect_descriptors)[3].flags, DescFlags::WRITE);
1136         }
1137     }
1138 
1139     /// Tests that the queue advises the device that notifications are needed.
1140     #[test]
set_dev_notify()1141     fn set_dev_notify() {
1142         let mut config_space = ();
1143         let state = Arc::new(Mutex::new(State {
1144             queues: vec![QueueStatus::default()],
1145             ..Default::default()
1146         }));
1147         let mut transport = FakeTransport {
1148             device_type: DeviceType::Block,
1149             max_queue_size: 4,
1150             device_features: 0,
1151             config_space: NonNull::from(&mut config_space),
1152             state: state.clone(),
1153         };
1154         let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0, false, false).unwrap();
1155 
1156         // Check that the avail ring's flag is zero by default.
1157         assert_eq!(
1158             unsafe { (*queue.avail.as_ptr()).flags.load(Ordering::Acquire) },
1159             0x0
1160         );
1161 
1162         queue.set_dev_notify(false);
1163 
1164         // Check that the avail ring's flag is 1 after `disable_dev_notify`.
1165         assert_eq!(
1166             unsafe { (*queue.avail.as_ptr()).flags.load(Ordering::Acquire) },
1167             0x1
1168         );
1169 
1170         queue.set_dev_notify(true);
1171 
1172         // Check that the avail ring's flag is 0 after `enable_dev_notify`.
1173         assert_eq!(
1174             unsafe { (*queue.avail.as_ptr()).flags.load(Ordering::Acquire) },
1175             0x0
1176         );
1177     }
1178 
1179     /// Tests that the queue notifies the device about added buffers, if it hasn't suppressed
1180     /// notifications.
1181     #[test]
add_notify()1182     fn add_notify() {
1183         let mut config_space = ();
1184         let state = Arc::new(Mutex::new(State {
1185             queues: vec![QueueStatus::default()],
1186             ..Default::default()
1187         }));
1188         let mut transport = FakeTransport {
1189             device_type: DeviceType::Block,
1190             max_queue_size: 4,
1191             device_features: 0,
1192             config_space: NonNull::from(&mut config_space),
1193             state: state.clone(),
1194         };
1195         let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0, false, false).unwrap();
1196 
1197         // Add a buffer chain with a single device-readable part.
1198         unsafe { queue.add(&[&[42]], &mut []) }.unwrap();
1199 
1200         // Check that the transport would be notified.
1201         assert_eq!(queue.should_notify(), true);
1202 
1203         // SAFETY: the various parts of the queue are properly aligned, dereferenceable and
1204         // initialised, and nothing else is accessing them at the same time.
1205         unsafe {
1206             // Suppress notifications.
1207             (*queue.used.as_ptr()).flags.store(0x01, Ordering::Release);
1208         }
1209 
1210         // Check that the transport would not be notified.
1211         assert_eq!(queue.should_notify(), false);
1212     }
1213 
1214     /// Tests that the queue notifies the device about added buffers, if it hasn't suppressed
1215     /// notifications with the `avail_event` index.
1216     #[test]
add_notify_event_idx()1217     fn add_notify_event_idx() {
1218         let mut config_space = ();
1219         let state = Arc::new(Mutex::new(State {
1220             queues: vec![QueueStatus::default()],
1221             ..Default::default()
1222         }));
1223         let mut transport = FakeTransport {
1224             device_type: DeviceType::Block,
1225             max_queue_size: 4,
1226             device_features: Feature::RING_EVENT_IDX.bits(),
1227             config_space: NonNull::from(&mut config_space),
1228             state: state.clone(),
1229         };
1230         let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0, false, true).unwrap();
1231 
1232         // Add a buffer chain with a single device-readable part.
1233         assert_eq!(unsafe { queue.add(&[&[42]], &mut []) }.unwrap(), 0);
1234 
1235         // Check that the transport would be notified.
1236         assert_eq!(queue.should_notify(), true);
1237 
1238         // SAFETY: the various parts of the queue are properly aligned, dereferenceable and
1239         // initialised, and nothing else is accessing them at the same time.
1240         unsafe {
1241             // Suppress notifications.
1242             (*queue.used.as_ptr())
1243                 .avail_event
1244                 .store(1, Ordering::Release);
1245         }
1246 
1247         // Check that the transport would not be notified.
1248         assert_eq!(queue.should_notify(), false);
1249 
1250         // Add another buffer chain.
1251         assert_eq!(unsafe { queue.add(&[&[42]], &mut []) }.unwrap(), 1);
1252 
1253         // Check that the transport should be notified again now.
1254         assert_eq!(queue.should_notify(), true);
1255     }
1256 }
1257