diff options
author | Carl Lerche <me@carllerche.com> | 2019-11-05 19:12:30 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2019-11-05 19:12:30 -0800 |
commit | d5c1119c881c9a8b511aa9000fd26b9bda014256 (patch) | |
tree | 72e2ca6b655f29e948a91ba4573a95350cb241e0 /tokio/src/util | |
parent | a6253ed05a1e0d14bc64915f5937c29092df9497 (diff) |
runtime: combine `executor` and `runtime` mods (#1734)
Now, all types are under `runtime`. `executor::util` is moved to a top
level `util` module.
Diffstat (limited to 'tokio/src/util')
-rw-r--r-- | tokio/src/util/mod.rs | 5 | ||||
-rw-r--r-- | tokio/src/util/pad.rs | 52 | ||||
-rw-r--r-- | tokio/src/util/rand.rs | 52 |
3 files changed, 109 insertions, 0 deletions
diff --git a/tokio/src/util/mod.rs b/tokio/src/util/mod.rs new file mode 100644 index 00000000..44377fcf --- /dev/null +++ b/tokio/src/util/mod.rs @@ -0,0 +1,5 @@ +mod pad; +pub(crate) use self::pad::CachePadded; + +mod rand; +pub(crate) use self::rand::FastRand; diff --git a/tokio/src/util/pad.rs b/tokio/src/util/pad.rs new file mode 100644 index 00000000..bf0913ca --- /dev/null +++ b/tokio/src/util/pad.rs @@ -0,0 +1,52 @@ +use core::fmt; +use core::ops::{Deref, DerefMut}; + +#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] +// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache +// lines at a time, so we have to align to 128 bytes rather than 64. +// +// Sources: +// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf +// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 +#[cfg_attr(target_arch = "x86_64", repr(align(128)))] +#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] +pub(crate) struct CachePadded<T> { + value: T, +} + +unsafe impl<T: Send> Send for CachePadded<T> {} +unsafe impl<T: Sync> Sync for CachePadded<T> {} + +impl<T> CachePadded<T> { + pub(crate) fn new(t: T) -> CachePadded<T> { + CachePadded::<T> { value: t } + } +} + +impl<T> Deref for CachePadded<T> { + type Target = T; + + fn deref(&self) -> &T { + &self.value + } +} + +impl<T> DerefMut for CachePadded<T> { + fn deref_mut(&mut self) -> &mut T { + &mut self.value + } +} + +impl<T: fmt::Debug> fmt::Debug for CachePadded<T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("CachePadded") + .field("value", &self.value) + .finish() + } +} + +impl<T> From<T> for CachePadded<T> { + fn from(t: T) -> Self { + CachePadded::new(t) + } +} diff --git a/tokio/src/util/rand.rs b/tokio/src/util/rand.rs new file mode 100644 index 00000000..11617e26 --- /dev/null +++ b/tokio/src/util/rand.rs @@ -0,0 +1,52 @@ +use std::cell::Cell; + +/// Fast random number generate +/// +/// Implement xorshift64+: 2 32-bit xorshift sequences added together. +/// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's +/// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +/// This generator passes the SmallCrush suite, part of TestU01 framework: +/// http://simul.iro.umontreal.ca/testu01/tu01.html +#[derive(Debug)] +pub(crate) struct FastRand { + one: Cell<u32>, + two: Cell<u32>, +} + +impl FastRand { + /// Initialize a new, thread-local, fast random number generator. + pub(crate) fn new(seed: u64) -> FastRand { + let one = (seed >> 32) as u32; + let mut two = seed as u32; + + if two == 0 { + // This value cannot be zero + two = 1; + } + + FastRand { + one: Cell::new(one), + two: Cell::new(two), + } + } + + pub(crate) fn fastrand_n(&self, n: u32) -> u32 { + // This is similar to fastrand() % n, but faster. + // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ + let mul = (self.fastrand() as u64).wrapping_mul(n as u64); + (mul >> 32) as u32 + } + + fn fastrand(&self) -> u32 { + let mut s1 = self.one.get(); + let s0 = self.two.get(); + + s1 ^= s1 << 17; + s1 = s1 ^ s0 ^ s1 >> 7 ^ s0 >> 16; + + self.one.set(s0); + self.two.set(s1); + + s0.wrapping_add(s1) + } +} |