summaryrefslogtreecommitdiffstats
path: root/openpgp/src/crypto/mem.rs
diff options
context:
space:
mode:
authorJustus Winter <justus@sequoia-pgp.org>2019-06-28 17:39:46 +0200
committerJustus Winter <justus@sequoia-pgp.org>2019-07-16 13:09:18 +0200
commit96472b23e00a80905ebcefc4f2015e7d9b17059a (patch)
tree38d3e935eb98091e30410d4af4de2a105fe3fbf1 /openpgp/src/crypto/mem.rs
parent7f65f84743a0c73bff69bbd5a87013bf4fb8f8b1 (diff)
openpgp: Encrypt unencrypted secret keys in memory.
- Add a new type crypto::mem::Encrypted, and use it to protect unencrypted secret keys while they are in memory. - This type encrypts sensitive data, such as secret keys, in memory while they are unused, and decrypts them on demand. This protects against cross-protection-boundary readout via microarchitectural flaws like Spectre or Meltdown, via attacks on physical layout like Rowbleed, and even via coldboot attacks. - The key insight is that these kinds of attacks are imperfect, i.e. the recovered data contains bitflips, or the attack only provides a probability for any given bit. Applied to cryptographic keys, these kind of imperfect attacks are enough to recover the actual key. - This implementation on the other hand, derives a sealing key from a large area of memory, the "pre-key", using a key derivation function. Now, any single bitflip in the readout of the pre-key will avalanche through all the bits in the sealing key, rendering it unusable with no indication of where the error occurred. - This kind of protection was pioneered by OpenSSH. The commit adding it is: https://marc.info/?l=openbsd-cvs&m=156109087822676
Diffstat (limited to 'openpgp/src/crypto/mem.rs')
-rw-r--r--openpgp/src/crypto/mem.rs128
1 files changed, 128 insertions, 0 deletions
diff --git a/openpgp/src/crypto/mem.rs b/openpgp/src/crypto/mem.rs
index f6bec6c0..aab9abee 100644
--- a/openpgp/src/crypto/mem.rs
+++ b/openpgp/src/crypto/mem.rs
@@ -89,6 +89,134 @@ impl fmt::Debug for Protected {
}
}
+/// Encrypted memory.
+///
+/// This type encrypts sensitive data, such as secret keys, in memory
+/// while they are unused, and decrypts them on demand. This protects
+/// against cross-protection-boundary readout via microarchitectural
+/// flaws like Spectre or Meltdown, via attacks on physical layout
+/// like Rowbleed, and even via coldboot attacks.
+///
+/// The key insight is that these kinds of attacks are imperfect,
+/// i.e. the recovered data contains bitflips, or the attack only
+/// provides a probability for any given bit. Applied to
+/// cryptographic keys, these kind of imperfect attacks are enough to
+/// recover the actual key.
+///
+/// This implementation on the other hand, derives a sealing key from
+/// a large area of memory, the "pre-key", using a key derivation
+/// function. Now, any single bitflip in the readout of the pre-key
+/// will avalanche through all the bits in the sealing key, rendering
+/// it unusable with no indication of where the error occurred.
+///
+/// This kind of protection was pioneered by OpenSSH. The commit
+/// adding it is: https://marc.info/?l=openbsd-cvs&m=156109087822676
+#[derive(Clone, Debug, PartialEq, Eq, Hash)]
+pub struct Encrypted {
+ ciphertext: Protected,
+ iv: Protected,
+}
+
+/// The number of pages containing random bytes to derive the prekey
+/// from.
+const ENCRYPTED_MEMORY_PREKEY_PAGES: usize = 4;
+
+/// Page size.
+const ENCRYPTED_MEMORY_PAGE_SIZE: usize = 4096;
+
+/// This module contains the code that needs to access the prekey.
+///
+/// Code outside of it cannot access it, because `PREKEY` is private.
+mod has_access_to_prekey {
+ use std::io::{self, Cursor, Write};
+ use lazy_static;
+ use crate::constants::{AEADAlgorithm, HashAlgorithm, SymmetricAlgorithm};
+ use crate::crypto::{aead, SessionKey};
+ use super::*;
+
+ lazy_static::lazy_static! {
+ static ref PREKEY: Box<[Box<[u8]>]> = {
+ let mut pages = Vec::new();
+ for _ in 0..ENCRYPTED_MEMORY_PREKEY_PAGES {
+ let mut page = vec![0; ENCRYPTED_MEMORY_PAGE_SIZE];
+ crate::crypto::random(&mut page);
+ pages.push(page.into());
+ }
+ pages.into()
+ };
+ }
+
+ // Algorithms used for the memory encryption.
+ //
+ // The digest of the hash algorithm must be at least as large as
+ // the size of the key used by the symmetric algorithm. All
+ // algorithms MUST be supported by the cryptographic library.
+ const HASH_ALGO: HashAlgorithm = HashAlgorithm::SHA256;
+ const SYMMETRIC_ALGO: SymmetricAlgorithm = SymmetricAlgorithm::AES256;
+ const AEAD_ALGO: AEADAlgorithm = AEADAlgorithm::EAX;
+
+ impl Encrypted {
+ /// Computes the sealing key used to encrypt the memory.
+ fn sealing_key() -> SessionKey {
+ let mut ctx = HASH_ALGO.context()
+ .expect("Mandatory algorithm unsupported");
+ PREKEY.iter().for_each(|page| ctx.update(page));
+ let mut sk: SessionKey = vec![0; 256/8].into();
+ ctx.digest(&mut sk);
+ sk
+ }
+
+ /// Encrypts the given chunk of memory.
+ pub fn new(p: Protected) -> Self {
+ let mut iv =
+ vec![0; AEAD_ALGO.iv_size()
+ .expect("Mandatory algorithm unsupported")];
+ crate::crypto::random(&mut iv);
+
+ let mut ciphertext = Vec::new();
+ {
+ let mut encryptor =
+ aead::Encryptor::new(1,
+ SYMMETRIC_ALGO,
+ AEAD_ALGO,
+ 4096,
+ &iv,
+ &Self::sealing_key(),
+ &mut ciphertext)
+ .expect("Mandatory algorithm unsupported");
+ encryptor.write_all(&p).unwrap();
+ encryptor.finish().unwrap();
+ }
+
+ Encrypted {
+ ciphertext: ciphertext.into(),
+ iv: iv.into(),
+ }
+ }
+
+ /// Maps the given function over the temporarily decrypted
+ /// memory.
+ pub fn map<F, T>(&self, mut fun: F) -> T
+ where F: FnMut(&Protected) -> T
+ {
+ let mut plaintext = Vec::new();
+ let mut decryptor =
+ aead::Decryptor::new(1,
+ SYMMETRIC_ALGO,
+ AEAD_ALGO,
+ 4096,
+ &self.iv,
+ &Self::sealing_key(),
+ Cursor::new(&self.ciphertext))
+ .expect("Mandatory algorithm unsupported");
+ io::copy(&mut decryptor, &mut plaintext)
+ .expect("Encrypted memory modified or corrupted");
+ let plaintext: Protected = plaintext.into();
+ fun(&plaintext)
+ }
+ }
+}
+
/// Time-constant comparison.
pub fn secure_cmp(a: &[u8], b: &[u8]) -> Ordering {
let ord1 = a.len().cmp(&b.len());