use core::fmt; use core::ops::{Deref, DerefMut}; #[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] // Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache // lines at a time, so we have to align to 128 bytes rather than 64. // // Sources: // - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf // - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 #[cfg_attr(target_arch = "x86_64", repr(align(128)))] #[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] pub(crate) struct CachePadded { value: T, } unsafe impl Send for CachePadded {} unsafe impl Sync for CachePadded {} impl CachePadded { pub(crate) fn new(t: T) -> CachePadded { CachePadded:: { value: t } } } impl Deref for CachePadded { type Target = T; fn deref(&self) -> &T { &self.value } } impl DerefMut for CachePadded { fn deref_mut(&mut self) -> &mut T { &mut self.value } } impl fmt::Debug for CachePadded { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("CachePadded") .field("value", &self.value) .finish() } } impl From for CachePadded { fn from(t: T) -> Self { CachePadded::new(t) } }