summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Masurel <paul.masurel@gmail.com>2019-04-24 12:31:32 +0900
committerGitHub <noreply@github.com>2019-04-24 12:31:32 +0900
commitb7c2d0de97583f06b65d20af57ff04bb3217876e (patch)
tree9c7ecf25bdf1734c67119d88edf8eb29268fe7b3
parenta228825462ab9b224c86047f964279b93d8d6038 (diff)
Clippy2 (#534)
* Clippy comments Clippy complaints that about the cast of &[u32] to a *const __m128i, because of the lack of alignment constraints. This commit passes the OutputBuffer object (which enforces proper alignment) instead of `&[u32]`. * Clippy. Block alignment * Code simplification * Added comment. Code simplification * Removed the extraneous freq block len hack.
-rw-r--r--CHANGELOG.md12
-rw-r--r--src/common/mod.rs1
-rw-r--r--src/docset.rs2
-rw-r--r--src/postings/block_search.rs44
-rw-r--r--src/postings/compression/mod.rs13
-rw-r--r--src/postings/segment_postings.rs81
-rw-r--r--src/query/term_query/mod.rs5
7 files changed, 113 insertions, 45 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1c73b9f..ab7f8cc 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,15 @@
+Tantivy 0.10.0
+====================
+
+
+Minor
+---------
+- Small simplification of the code.
+Calling .freq() or .doc() when .advance() has never
+on segment postings should panic from now on.
+
+
+
Tantivy 0.9.0
=====================
*0.9.0 index format is not compatible with the
diff --git a/src/common/mod.rs b/src/common/mod.rs
index 78ec3c4..8f6deaf 100644
--- a/src/common/mod.rs
+++ b/src/common/mod.rs
@@ -13,7 +13,6 @@ pub use self::serialize::{BinarySerializable, FixedSize};
pub use self::vint::{read_u32_vint, serialize_vint_u32, write_u32_vint, VInt};
pub use byteorder::LittleEndian as Endianness;
-
/// Segment's max doc must be `< MAX_DOC_LIMIT`.
///
/// We do not allow segments with more than
diff --git a/src/docset.rs b/src/docset.rs
index acd47be..5d6b6f6 100644
--- a/src/docset.rs
+++ b/src/docset.rs
@@ -1,9 +1,9 @@
use common::BitSet;
+use fastfield::DeleteBitSet;
use std::borrow::Borrow;
use std::borrow::BorrowMut;
use std::cmp::Ordering;
use DocId;
-use fastfield::DeleteBitSet;
/// Expresses the outcome of a call to `DocSet`'s `.skip_next(...)`.
#[derive(PartialEq, Eq, Debug)]
diff --git a/src/postings/block_search.rs b/src/postings/block_search.rs
index 1cea9ff..04da35b 100644
--- a/src/postings/block_search.rs
+++ b/src/postings/block_search.rs
@@ -1,3 +1,5 @@
+use postings::compression::AlignedBuffer;
+
/// This modules define the logic used to search for a doc in a given
/// block. (at most 128 docs)
///
@@ -6,7 +8,7 @@
#[cfg(target_arch = "x86_64")]
mod sse2 {
- use postings::compression::COMPRESSION_BLOCK_SIZE;
+ use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
use std::arch::x86_64::__m128i as DataType;
use std::arch::x86_64::_mm_add_epi32 as op_add;
use std::arch::x86_64::_mm_cmplt_epi32 as op_lt;
@@ -23,9 +25,9 @@ mod sse2 {
///
/// There is no early exit here. We simply count the
/// number of elements that are `< target`.
- pub fn linear_search_sse2_128(arr: &[u32], target: u32) -> usize {
+ pub(crate) fn linear_search_sse2_128(arr: &AlignedBuffer, target: u32) -> usize {
unsafe {
- let ptr = arr.as_ptr() as *const DataType;
+ let ptr = arr as *const AlignedBuffer as *const DataType;
let vkey = set1(target as i32);
let mut cnt = set0();
// We work over 4 `__m128i` at a time.
@@ -47,14 +49,16 @@ mod sse2 {
#[cfg(test)]
mod test {
use super::linear_search_sse2_128;
+ use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
#[test]
fn test_linear_search_sse2_128_u32() {
- for i in 0..23 {
- dbg!(i);
- let arr: Vec<u32> = (0..128).map(|el| el * 2 + 1 << 18).collect();
- assert_eq!(linear_search_sse2_128(&arr, arr[64] + 1), 65);
+ let mut block = [0u32; COMPRESSION_BLOCK_SIZE];
+ for el in 0u32..128u32 {
+ block[el as usize] = el * 2 + 1 << 18;
}
+ let target = block[64] + 1;
+ assert_eq!(linear_search_sse2_128(&AlignedBuffer(block), target), 65);
}
}
}
@@ -127,15 +131,21 @@ impl BlockSearcher {
/// then we use a different implementation that does an exhaustive linear search over
/// the full block whenever the block is full (`len == 128`). It is surprisingly faster, most likely because of the lack
/// of branch.
- pub fn search_in_block(self, block_docs: &[u32], start: usize, target: u32) -> usize {
+ pub(crate) fn search_in_block(
+ self,
+ block_docs: &AlignedBuffer,
+ len: usize,
+ start: usize,
+ target: u32,
+ ) -> usize {
#[cfg(target_arch = "x86_64")]
{
use postings::compression::COMPRESSION_BLOCK_SIZE;
- if self == BlockSearcher::SSE2 && block_docs.len() == COMPRESSION_BLOCK_SIZE {
+ if self == BlockSearcher::SSE2 && len == COMPRESSION_BLOCK_SIZE {
return sse2::linear_search_sse2_128(block_docs, target);
}
}
- start + galloping(&block_docs[start..], target)
+ start + galloping(&block_docs.0[start..len], target)
}
}
@@ -156,6 +166,7 @@ mod tests {
use super::exponential_search;
use super::linear_search;
use super::BlockSearcher;
+ use postings::compression::{AlignedBuffer, COMPRESSION_BLOCK_SIZE};
#[test]
fn test_linear_search() {
@@ -184,8 +195,19 @@ mod tests {
fn util_test_search_in_block(block_searcher: BlockSearcher, block: &[u32], target: u32) {
let cursor = search_in_block_trivial_but_slow(block, target);
+ assert!(block.len() < COMPRESSION_BLOCK_SIZE);
+ let mut output_buffer = [u32::max_value(); COMPRESSION_BLOCK_SIZE];
+ output_buffer[..block.len()].copy_from_slice(block);
for i in 0..cursor {
- assert_eq!(block_searcher.search_in_block(block, i, target), cursor);
+ assert_eq!(
+ block_searcher.search_in_block(
+ &AlignedBuffer(output_buffer),
+ block.len(),
+ i,
+ target
+ ),
+ cursor
+ );
}
}
diff --git a/src/postings/compression/mod.rs b/src/postings/compression/mod.rs
index f35b6cd..d1bed4b 100644
--- a/src/postings/compression/mod.rs
+++ b/src/postings/compression/mod.rs
@@ -46,11 +46,11 @@ impl BlockEncoder {
/// We ensure that the OutputBuffer is align on 128 bits
/// in order to run SSE2 linear search on it.
#[repr(align(128))]
-struct OutputBuffer([u32; COMPRESSION_BLOCK_SIZE + 1]);
+pub(crate) struct AlignedBuffer(pub [u32; COMPRESSION_BLOCK_SIZE]);
pub struct BlockDecoder {
bitpacker: BitPacker4x,
- output: OutputBuffer,
+ output: AlignedBuffer,
pub output_len: usize,
}
@@ -60,11 +60,9 @@ impl BlockDecoder {
}
pub fn with_val(val: u32) -> BlockDecoder {
- let mut output = [val; COMPRESSION_BLOCK_SIZE + 1];
- output[COMPRESSION_BLOCK_SIZE] = 0u32;
BlockDecoder {
bitpacker: BitPacker4x::new(),
- output: OutputBuffer(output),
+ output: AlignedBuffer([val; COMPRESSION_BLOCK_SIZE]),
output_len: 0,
}
}
@@ -92,6 +90,11 @@ impl BlockDecoder {
}
#[inline]
+ pub(crate) fn output_aligned(&self) -> (&AlignedBuffer, usize) {
+ (&self.output, self.output_len)
+ }
+
+ #[inline]
pub fn output(&self, idx: usize) -> u32 {
self.output.0[idx]
}
diff --git a/src/postings/segment_postings.rs b/src/postings/segment_postings.rs
index b12c0b7..b189d02 100644
--- a/src/postings/segment_postings.rs
+++ b/src/postings/segment_postings.rs
@@ -4,7 +4,7 @@ use common::{BinarySerializable, VInt};
use docset::{DocSet, SkipResult};
use owned_read::OwnedRead;
use positions::PositionReader;
-use postings::compression::compressed_block_size;
+use postings::compression::{compressed_block_size, AlignedBuffer};
use postings::compression::{BlockDecoder, VIntDecoder, COMPRESSION_BLOCK_SIZE};
use postings::serializer::PostingsSerializer;
use postings::BlockSearcher;
@@ -130,9 +130,11 @@ impl DocSet for SegmentPostings {
// next needs to be called a first time to point to the correct element.
#[inline]
fn advance(&mut self) -> bool {
- if self.position_computer.is_some() {
+ if self.position_computer.is_some() && self.cur < COMPRESSION_BLOCK_SIZE {
let term_freq = self.term_freq() as usize;
- self.position_computer.as_mut().unwrap().add_skip(term_freq);
+ if let Some(position_computer) = self.position_computer.as_mut() {
+ position_computer.add_skip(term_freq);
+ }
}
self.cur += 1;
if self.cur >= self.block_cursor.block_len() {
@@ -167,7 +169,6 @@ impl DocSet for SegmentPostings {
// skip blocks until one that might contain the target
// check if we need to go to the next block
- let need_positions = self.position_computer.is_some();
let mut sum_freqs_skipped: u32 = 0;
if !self
.block_cursor
@@ -181,7 +182,7 @@ impl DocSet for SegmentPostings {
// we are not in the right block.
//
// First compute all of the freqs skipped from the current block.
- if need_positions {
+ if self.position_computer.is_some() {
sum_freqs_skipped = self.block_cursor.freqs()[self.cur..].iter().sum();
match self.block_cursor.skip_to(target) {
BlockSegmentPostingsSkipResult::Success(block_skip_freqs) => {
@@ -200,24 +201,21 @@ impl DocSet for SegmentPostings {
self.cur = 0;
}
+ let cur = self.cur;
+
// we're in the right block now, start with an exponential search
- let block_docs = self.block_cursor.docs();
+ let (output, len) = self.block_cursor.docs_aligned();
let new_cur = self
.block_searcher
- .search_in_block(&block_docs, self.cur, target);
- if need_positions {
- sum_freqs_skipped += self.block_cursor.freqs()[self.cur..new_cur]
- .iter()
- .sum::<u32>();
- self.position_computer
- .as_mut()
- .unwrap()
- .add_skip(sum_freqs_skipped as usize);
+ .search_in_block(&output, len, cur, target);
+ if let Some(position_computer) = self.position_computer.as_mut() {
+ sum_freqs_skipped += self.block_cursor.freqs()[cur..new_cur].iter().sum::<u32>();
+ position_computer.add_skip(sum_freqs_skipped as usize);
}
self.cur = new_cur;
// `doc` is now the first element >= `target`
- let doc = block_docs[new_cur];
+ let doc = output.0[new_cur];
debug_assert!(doc >= target);
if doc == target {
SkipResult::Reached
@@ -227,12 +225,16 @@ impl DocSet for SegmentPostings {
}
/// Return the current document's `DocId`.
+ ///
+ /// # Panics
+ ///
+ /// Will panics if called without having called advance before.
#[inline]
fn doc(&self) -> DocId {
let docs = self.block_cursor.docs();
debug_assert!(
self.cur < docs.len(),
- "Have you forgotten to call `.advance()` at least once before calling .doc()."
+ "Have you forgotten to call `.advance()` at least once before calling `.doc()` ."
);
docs[self.cur]
}
@@ -264,17 +266,33 @@ impl HasLen for SegmentPostings {
}
impl Postings for SegmentPostings {
+ /// Returns the frequency associated to the current document.
+ /// If the schema is set up so that no frequency have been encoded,
+ /// this method should always return 1.
+ ///
+ /// # Panics
+ ///
+ /// Will panics if called without having called advance before.
fn term_freq(&self) -> u32 {
+ debug_assert!(
+ // Here we do not use the len of `freqs()`
+ // because it is actually ok to request for the freq of doc
+ // even if no frequency were encoded for the field.
+ //
+ // In that case we hit the block just as if the frequency had been
+ // decoded. The block is simply prefilled by the value 1.
+ self.cur < COMPRESSION_BLOCK_SIZE,
+ "Have you forgotten to call `.advance()` at least once before calling \
+ `.term_freq()`."
+ );
self.block_cursor.freq(self.cur)
}
fn positions_with_offset(&mut self, offset: u32, output: &mut Vec<u32>) {
- if self.position_computer.is_some() {
- output.resize(self.term_freq() as usize, 0u32);
- self.position_computer
- .as_mut()
- .unwrap()
- .positions_with_offset(offset, &mut output[..])
+ let term_freq = self.term_freq() as usize;
+ if let Some(position_comp) = self.position_computer.as_mut() {
+ output.resize(term_freq, 0u32);
+ position_comp.positions_with_offset(offset, &mut output[..]);
} else {
output.clear();
}
@@ -396,6 +414,10 @@ impl BlockSegmentPostings {
self.doc_decoder.output_array()
}
+ pub(crate) fn docs_aligned(&self) -> (&AlignedBuffer, usize) {
+ self.doc_decoder.output_aligned()
+ }
+
/// Return the document at index `idx` of the block.
#[inline]
pub fn doc(&self, idx: usize) -> u32 {
@@ -592,6 +614,7 @@ mod tests {
use common::HasLen;
use core::Index;
use docset::DocSet;
+ use postings::postings::Postings;
use schema::IndexRecordOption;
use schema::Schema;
use schema::Term;
@@ -609,6 +632,18 @@ mod tests {
}
#[test]
+ #[should_panic(expected = "Have you forgotten to call `.advance()`")]
+ fn test_panic_if_doc_called_before_advance() {
+ SegmentPostings::empty().doc();
+ }
+
+ #[test]
+ #[should_panic(expected = "Have you forgotten to call `.advance()`")]
+ fn test_panic_if_freq_called_before_advance() {
+ SegmentPostings::empty().term_freq();
+ }
+
+ #[test]
fn test_empty_block_segment_postings() {
let mut postings = BlockSegmentPostings::empty();
assert!(!postings.advance());
diff --git a/src/query/term_query/mod.rs b/src/query/term_query/mod.rs
index ab89ca2..23db31f 100644
--- a/src/query/term_query/mod.rs
+++ b/src/query/term_query/mod.rs
@@ -98,16 +98,13 @@ mod tests {
}
}
-
#[test]
fn test_term_query_count_when_there_are_deletes() {
let mut schema_builder = Schema::builder();
let text_field = schema_builder.add_text_field("text", TEXT);
let schema = schema_builder.build();
let index = Index::create_in_ram(schema);
- let mut index_writer = index
- .writer_with_num_threads(1, 5_000_000)
- .unwrap();
+ let mut index_writer = index.writer_with_num_threads(1, 5_000_000).unwrap();
index_writer.add_document(doc!(text_field=>"a b"));
index_writer.add_document(doc!(text_field=>"a c"));
index_writer.delete_term(Term::from_field_text(text_field, "b"));