1 // Portions Copyright 2019 Red Hat, Inc.
2 //
3 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 //
5 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
6 // Use of this source code is governed by a BSD-style license that can be
7 // found in the LICENSE-BSD-3-Clause file.
8 //
9 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
10 
11 //! Define the `ByteValued` trait to mark that it is safe to instantiate the struct with random
12 //! data.
13 
14 use std::io::{Read, Write};
15 use std::mem::size_of;
16 use std::result::Result;
17 use std::slice::{from_raw_parts, from_raw_parts_mut};
18 use std::sync::atomic::Ordering;
19 
20 use crate::atomic_integer::AtomicInteger;
21 use crate::volatile_memory::VolatileSlice;
22 
23 /// Types for which it is safe to initialize from raw data.
24 ///
25 /// # Safety
26 ///
27 /// A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a
28 /// byte array.  This is generally true for all plain-old-data structs.  It is notably not true for
29 /// any type that includes a reference. It is generally also not safe for non-packed structs, as
30 /// compiler-inserted padding is considered uninitialized memory, and thus reads/writing it will
31 /// cause undefined behavior.
32 ///
33 /// Implementing this trait guarantees that it is safe to instantiate the struct with random data.
34 pub unsafe trait ByteValued: Copy + Default + Send + Sync {
35     /// Converts a slice of raw data into a reference of `Self`.
36     ///
37     /// The value of `data` is not copied. Instead a reference is made from the given slice. The
38     /// value of `Self` will depend on the representation of the type in memory, and may change in
39     /// an unstable fashion.
40     ///
41     /// This will return `None` if the length of data does not match the size of `Self`, or if the
42     /// data is not aligned for the type of `Self`.
from_slice(data: &[u8]) -> Option<&Self>43     fn from_slice(data: &[u8]) -> Option<&Self> {
44         // Early out to avoid an unneeded `align_to` call.
45         if data.len() != size_of::<Self>() {
46             return None;
47         }
48 
49         // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and
50         // we ensured the size of the pointer's buffer is the correct size. The `align_to` method
51         // ensures that we don't have any unaligned references. This aliases a pointer, but because
52         // the pointer is from a const slice reference, there are no mutable aliases. Finally, the
53         // reference returned can not outlive data because they have equal implicit lifetime
54         // constraints.
55         match unsafe { data.align_to::<Self>() } {
56             ([], [mid], []) => Some(mid),
57             _ => None,
58         }
59     }
60 
61     /// Converts a mutable slice of raw data into a mutable reference of `Self`.
62     ///
63     /// Because `Self` is made from a reference to the mutable slice, mutations to the returned
64     /// reference are immediately reflected in `data`. The value of the returned `Self` will depend
65     /// on the representation of the type in memory, and may change in an unstable fashion.
66     ///
67     /// This will return `None` if the length of data does not match the size of `Self`, or if the
68     /// data is not aligned for the type of `Self`.
from_mut_slice(data: &mut [u8]) -> Option<&mut Self>69     fn from_mut_slice(data: &mut [u8]) -> Option<&mut Self> {
70         // Early out to avoid an unneeded `align_to_mut` call.
71         if data.len() != size_of::<Self>() {
72             return None;
73         }
74 
75         // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and
76         // we ensured the size of the pointer's buffer is the correct size. The `align_to` method
77         // ensures that we don't have any unaligned references. This aliases a pointer, but because
78         // the pointer is from a mut slice reference, we borrow the passed in mutable reference.
79         // Finally, the reference returned can not outlive data because they have equal implicit
80         // lifetime constraints.
81         match unsafe { data.align_to_mut::<Self>() } {
82             ([], [mid], []) => Some(mid),
83             _ => None,
84         }
85     }
86 
87     /// Converts a reference to `self` into a slice of bytes.
88     ///
89     /// The value of `self` is not copied. Instead, the slice is made from a reference to `self`.
90     /// The value of bytes in the returned slice will depend on the representation of the type in
91     /// memory, and may change in an unstable fashion.
as_slice(&self) -> &[u8]92     fn as_slice(&self) -> &[u8] {
93         // SAFETY: Safe because the entire size of self is accessible as bytes because the trait
94         // guarantees it. The lifetime of the returned slice is the same as the passed reference,
95         // so that no dangling pointers will result from this pointer alias.
96         unsafe { from_raw_parts(self as *const Self as *const u8, size_of::<Self>()) }
97     }
98 
99     /// Converts a mutable reference to `self` into a mutable slice of bytes.
100     ///
101     /// Because the slice is made from a reference to `self`, mutations to the returned slice are
102     /// immediately reflected in `self`. The value of bytes in the returned slice will depend on
103     /// the representation of the type in memory, and may change in an unstable fashion.
as_mut_slice(&mut self) -> &mut [u8]104     fn as_mut_slice(&mut self) -> &mut [u8] {
105         // SAFETY: Safe because the entire size of self is accessible as bytes because the trait
106         // guarantees it. The trait also guarantees that any combination of bytes is valid for this
107         // type, so modifying them in the form of a byte slice is valid. The lifetime of the
108         // returned slice is the same as the passed reference, so that no dangling pointers will
109         // result from this pointer alias. Although this does alias a mutable pointer, we do so by
110         // exclusively borrowing the given mutable reference.
111         unsafe { from_raw_parts_mut(self as *mut Self as *mut u8, size_of::<Self>()) }
112     }
113 
114     /// Converts a mutable reference to `self` into a `VolatileSlice`.  This is
115     /// useful because `VolatileSlice` provides a `Bytes<usize>` implementation.
116     ///
117     /// # Safety
118     ///
119     /// Unlike most `VolatileMemory` implementation, this method requires an exclusive
120     /// reference to `self`; this trivially fulfills `VolatileSlice::new`'s requirement
121     /// that all accesses to `self` use volatile accesses (because there can
122     /// be no other accesses).
as_bytes(&mut self) -> VolatileSlice123     fn as_bytes(&mut self) -> VolatileSlice {
124         // SAFETY: This is safe because the lifetime is the same as self
125         unsafe { VolatileSlice::new(self as *mut Self as *mut _, size_of::<Self>()) }
126     }
127 }
128 
129 macro_rules! byte_valued_array {
130     ($T:ty, $($N:expr)+) => {
131         $(
132             // SAFETY: All intrinsic types and arrays of intrinsic types are ByteValued.
133             // They are just numbers.
134             unsafe impl ByteValued for [$T; $N] {}
135         )+
136     }
137 }
138 
139 macro_rules! byte_valued_type {
140     ($T:ty) => {
141         // SAFETY: Safe as long T is POD.
142         // We are using this macro to generated the implementation for integer types below.
143         unsafe impl ByteValued for $T {}
144         byte_valued_array! {
145             $T,
146             0  1  2  3  4  5  6  7  8  9
147             10 11 12 13 14 15 16 17 18 19
148             20 21 22 23 24 25 26 27 28 29
149             30 31 32
150         }
151     };
152 }
153 
154 byte_valued_type!(u8);
155 byte_valued_type!(u16);
156 byte_valued_type!(u32);
157 byte_valued_type!(u64);
158 byte_valued_type!(u128);
159 byte_valued_type!(usize);
160 byte_valued_type!(i8);
161 byte_valued_type!(i16);
162 byte_valued_type!(i32);
163 byte_valued_type!(i64);
164 byte_valued_type!(i128);
165 byte_valued_type!(isize);
166 
167 /// A trait used to identify types which can be accessed atomically by proxy.
168 pub trait AtomicAccess:
169     ByteValued
170     // Could not find a more succinct way of stating that `Self` can be converted
171     // into `Self::A::V`, and the other way around.
172     + From<<<Self as AtomicAccess>::A as AtomicInteger>::V>
173     + Into<<<Self as AtomicAccess>::A as AtomicInteger>::V>
174 {
175     /// The `AtomicInteger` that atomic operations on `Self` are based on.
176     type A: AtomicInteger;
177 }
178 
179 macro_rules! impl_atomic_access {
180     ($T:ty, $A:path) => {
181         impl AtomicAccess for $T {
182             type A = $A;
183         }
184     };
185 }
186 
187 impl_atomic_access!(i8, std::sync::atomic::AtomicI8);
188 impl_atomic_access!(i16, std::sync::atomic::AtomicI16);
189 impl_atomic_access!(i32, std::sync::atomic::AtomicI32);
190 #[cfg(any(
191     target_arch = "x86_64",
192     target_arch = "aarch64",
193     target_arch = "powerpc64",
194     target_arch = "s390x"
195 ))]
196 impl_atomic_access!(i64, std::sync::atomic::AtomicI64);
197 
198 impl_atomic_access!(u8, std::sync::atomic::AtomicU8);
199 impl_atomic_access!(u16, std::sync::atomic::AtomicU16);
200 impl_atomic_access!(u32, std::sync::atomic::AtomicU32);
201 #[cfg(any(
202     target_arch = "x86_64",
203     target_arch = "aarch64",
204     target_arch = "powerpc64",
205     target_arch = "s390x"
206 ))]
207 impl_atomic_access!(u64, std::sync::atomic::AtomicU64);
208 
209 impl_atomic_access!(isize, std::sync::atomic::AtomicIsize);
210 impl_atomic_access!(usize, std::sync::atomic::AtomicUsize);
211 
212 /// A container to host a range of bytes and access its content.
213 ///
214 /// Candidates which may implement this trait include:
215 /// - anonymous memory areas
216 /// - mmapped memory areas
217 /// - data files
218 /// - a proxy to access memory on remote
219 pub trait Bytes<A> {
220     /// Associated error codes
221     type E;
222 
223     /// Writes a slice into the container at `addr`.
224     ///
225     /// Returns the number of bytes written. The number of bytes written can
226     /// be less than the length of the slice if there isn't enough room in the
227     /// container.
228     fn write(&self, buf: &[u8], addr: A) -> Result<usize, Self::E>;
229 
230     /// Reads data from the container at `addr` into a slice.
231     ///
232     /// Returns the number of bytes read. The number of bytes read can be less than the length
233     /// of the slice if there isn't enough data within the container.
234     fn read(&self, buf: &mut [u8], addr: A) -> Result<usize, Self::E>;
235 
236     /// Writes the entire content of a slice into the container at `addr`.
237     ///
238     /// # Errors
239     ///
240     /// Returns an error if there isn't enough space within the container to write the entire slice.
241     /// Part of the data may have been copied nevertheless.
242     fn write_slice(&self, buf: &[u8], addr: A) -> Result<(), Self::E>;
243 
244     /// Reads data from the container at `addr` to fill an entire slice.
245     ///
246     /// # Errors
247     ///
248     /// Returns an error if there isn't enough data within the container to fill the entire slice.
249     /// Part of the data may have been copied nevertheless.
250     fn read_slice(&self, buf: &mut [u8], addr: A) -> Result<(), Self::E>;
251 
252     /// Writes an object into the container at `addr`.
253     ///
254     /// # Errors
255     ///
256     /// Returns an error if the object doesn't fit inside the container.
257     fn write_obj<T: ByteValued>(&self, val: T, addr: A) -> Result<(), Self::E> {
258         self.write_slice(val.as_slice(), addr)
259     }
260 
261     /// Reads an object from the container at `addr`.
262     ///
263     /// Reading from a volatile area isn't strictly safe as it could change mid-read.
264     /// However, as long as the type T is plain old data and can handle random initialization,
265     /// everything will be OK.
266     ///
267     /// # Errors
268     ///
269     /// Returns an error if there's not enough data inside the container.
270     fn read_obj<T: ByteValued>(&self, addr: A) -> Result<T, Self::E> {
271         let mut result: T = Default::default();
272         self.read_slice(result.as_mut_slice(), addr).map(|_| result)
273     }
274 
275     /// Reads up to `count` bytes from an object and writes them into the container at `addr`.
276     ///
277     /// Returns the number of bytes written into the container.
278     ///
279     /// # Arguments
280     /// * `addr` - Begin writing at this address.
281     /// * `src` - Copy from `src` into the container.
282     /// * `count` - Copy `count` bytes from `src` into the container.
283     fn read_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<usize, Self::E>
284     where
285         F: Read;
286 
287     /// Reads exactly `count` bytes from an object and writes them into the container at `addr`.
288     ///
289     /// # Errors
290     ///
291     /// Returns an error if `count` bytes couldn't have been copied from `src` to the container.
292     /// Part of the data may have been copied nevertheless.
293     ///
294     /// # Arguments
295     /// * `addr` - Begin writing at this address.
296     /// * `src` - Copy from `src` into the container.
297     /// * `count` - Copy exactly `count` bytes from `src` into the container.
298     fn read_exact_from<F>(&self, addr: A, src: &mut F, count: usize) -> Result<(), Self::E>
299     where
300         F: Read;
301 
302     /// Reads up to `count` bytes from the container at `addr` and writes them it into an object.
303     ///
304     /// Returns the number of bytes written into the object.
305     ///
306     /// # Arguments
307     /// * `addr` - Begin reading from this address.
308     /// * `dst` - Copy from the container to `dst`.
309     /// * `count` - Copy `count` bytes from the container to `dst`.
310     fn write_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<usize, Self::E>
311     where
312         F: Write;
313 
314     /// Reads exactly `count` bytes from the container at `addr` and writes them into an object.
315     ///
316     /// # Errors
317     ///
318     /// Returns an error if `count` bytes couldn't have been copied from the container to `dst`.
319     /// Part of the data may have been copied nevertheless.
320     ///
321     /// # Arguments
322     /// * `addr` - Begin reading from this address.
323     /// * `dst` - Copy from the container to `dst`.
324     /// * `count` - Copy exactly `count` bytes from the container to `dst`.
325     fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
326     where
327         F: Write;
328 
329     /// Atomically store a value at the specified address.
330     fn store<T: AtomicAccess>(&self, val: T, addr: A, order: Ordering) -> Result<(), Self::E>;
331 
332     /// Atomically load a value from the specified address.
333     fn load<T: AtomicAccess>(&self, addr: A, order: Ordering) -> Result<T, Self::E>;
334 }
335 
336 #[cfg(test)]
337 pub(crate) mod tests {
338     #![allow(clippy::undocumented_unsafe_blocks)]
339     use super::*;
340 
341     use std::cell::RefCell;
342     use std::fmt::Debug;
343     use std::mem::align_of;
344 
345     // Helper method to test atomic accesses for a given `b: Bytes` that's supposed to be
346     // zero-initialized.
347     pub fn check_atomic_accesses<A, B>(b: B, addr: A, bad_addr: A)
348     where
349         A: Copy,
350         B: Bytes<A>,
351         B::E: Debug,
352     {
353         let val = 100u32;
354 
355         assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), 0);
356         b.store(val, addr, Ordering::Relaxed).unwrap();
357         assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), val);
358 
359         assert!(b.load::<u32>(bad_addr, Ordering::Relaxed).is_err());
360         assert!(b.store(val, bad_addr, Ordering::Relaxed).is_err());
361     }
362 
363     fn check_byte_valued_type<T>()
364     where
365         T: ByteValued + PartialEq + Debug + Default,
366     {
367         let mut data = [0u8; 48];
368         let pre_len = {
369             let (pre, _, _) = unsafe { data.align_to::<T>() };
370             pre.len()
371         };
372         {
373             let aligned_data = &mut data[pre_len..pre_len + size_of::<T>()];
374             {
375                 let mut val: T = Default::default();
376                 assert_eq!(T::from_slice(aligned_data), Some(&val));
377                 assert_eq!(T::from_mut_slice(aligned_data), Some(&mut val));
378                 assert_eq!(val.as_slice(), aligned_data);
379                 assert_eq!(val.as_mut_slice(), aligned_data);
380             }
381         }
382         for i in 1..size_of::<T>().min(align_of::<T>()) {
383             let begin = pre_len + i;
384             let end = begin + size_of::<T>();
385             let unaligned_data = &mut data[begin..end];
386             {
387                 if align_of::<T>() != 1 {
388                     assert_eq!(T::from_slice(unaligned_data), None);
389                     assert_eq!(T::from_mut_slice(unaligned_data), None);
390                 }
391             }
392         }
393         // Check the early out condition
394         {
395             assert!(T::from_slice(&data).is_none());
396             assert!(T::from_mut_slice(&mut data).is_none());
397         }
398     }
399 
400     #[test]
401     fn test_byte_valued() {
402         check_byte_valued_type::<u8>();
403         check_byte_valued_type::<u16>();
404         check_byte_valued_type::<u32>();
405         check_byte_valued_type::<u64>();
406         check_byte_valued_type::<u128>();
407         check_byte_valued_type::<usize>();
408         check_byte_valued_type::<i8>();
409         check_byte_valued_type::<i16>();
410         check_byte_valued_type::<i32>();
411         check_byte_valued_type::<i64>();
412         check_byte_valued_type::<i128>();
413         check_byte_valued_type::<isize>();
414     }
415 
416     pub const MOCK_BYTES_CONTAINER_SIZE: usize = 10;
417 
418     pub struct MockBytesContainer {
419         container: RefCell<[u8; MOCK_BYTES_CONTAINER_SIZE]>,
420     }
421 
422     impl MockBytesContainer {
423         pub fn new() -> Self {
424             MockBytesContainer {
425                 container: RefCell::new([0; MOCK_BYTES_CONTAINER_SIZE]),
426             }
427         }
428 
429         pub fn validate_slice_op(&self, buf: &[u8], addr: usize) -> Result<(), ()> {
430             if MOCK_BYTES_CONTAINER_SIZE - buf.len() <= addr {
431                 return Err(());
432             }
433 
434             Ok(())
435         }
436     }
437 
438     impl Bytes<usize> for MockBytesContainer {
439         type E = ();
440 
441         fn write(&self, _: &[u8], _: usize) -> Result<usize, Self::E> {
442             unimplemented!()
443         }
444 
445         fn read(&self, _: &mut [u8], _: usize) -> Result<usize, Self::E> {
446             unimplemented!()
447         }
448 
449         fn write_slice(&self, buf: &[u8], addr: usize) -> Result<(), Self::E> {
450             self.validate_slice_op(buf, addr)?;
451 
452             let mut container = self.container.borrow_mut();
453             container[addr..addr + buf.len()].copy_from_slice(buf);
454 
455             Ok(())
456         }
457 
458         fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<(), Self::E> {
459             self.validate_slice_op(buf, addr)?;
460 
461             let container = self.container.borrow();
462             buf.copy_from_slice(&container[addr..addr + buf.len()]);
463 
464             Ok(())
465         }
466 
467         fn read_from<F>(&self, _: usize, _: &mut F, _: usize) -> Result<usize, Self::E>
468         where
469             F: Read,
470         {
471             unimplemented!()
472         }
473 
474         fn read_exact_from<F>(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E>
475         where
476             F: Read,
477         {
478             unimplemented!()
479         }
480 
481         fn write_to<F>(&self, _: usize, _: &mut F, _: usize) -> Result<usize, Self::E>
482         where
483             F: Write,
484         {
485             unimplemented!()
486         }
487 
488         fn write_all_to<F>(&self, _: usize, _: &mut F, _: usize) -> Result<(), Self::E>
489         where
490             F: Write,
491         {
492             unimplemented!()
493         }
494 
495         fn store<T: AtomicAccess>(
496             &self,
497             _val: T,
498             _addr: usize,
499             _order: Ordering,
500         ) -> Result<(), Self::E> {
501             unimplemented!()
502         }
503 
504         fn load<T: AtomicAccess>(&self, _addr: usize, _order: Ordering) -> Result<T, Self::E> {
505             unimplemented!()
506         }
507     }
508 
509     #[test]
510     fn test_bytes() {
511         let bytes = MockBytesContainer::new();
512 
513         assert!(bytes.write_obj(std::u64::MAX, 0).is_ok());
514         assert_eq!(bytes.read_obj::<u64>(0).unwrap(), std::u64::MAX);
515 
516         assert!(bytes
517             .write_obj(std::u64::MAX, MOCK_BYTES_CONTAINER_SIZE)
518             .is_err());
519         assert!(bytes.read_obj::<u64>(MOCK_BYTES_CONTAINER_SIZE).is_err());
520     }
521 
522     #[repr(C)]
523     #[derive(Copy, Clone, Default)]
524     struct S {
525         a: u32,
526         b: u32,
527     }
528 
529     unsafe impl ByteValued for S {}
530 
531     #[test]
532     fn byte_valued_slice() {
533         let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1];
534         let mut s: S = Default::default();
535         s.as_bytes().copy_from(&a);
536         assert_eq!(s.a, 0);
537         assert_eq!(s.b, 0x0101_0101);
538     }
539 }
540