/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef THETA_SKETCH_IMPL_HPP_ #define THETA_SKETCH_IMPL_HPP_ #include #include #include #include "serde.hpp" #include "binomial_bounds.hpp" #include "theta_helpers.hpp" #include "count_zeros.hpp" #include "bit_packing.hpp" namespace datasketches { template bool base_theta_sketch_alloc::is_estimation_mode() const { return get_theta64() < theta_constants::MAX_THETA && !is_empty(); } template double base_theta_sketch_alloc::get_theta() const { return static_cast(get_theta64()) / static_cast(theta_constants::MAX_THETA); } template double base_theta_sketch_alloc::get_estimate() const { return get_num_retained() / get_theta(); } template double base_theta_sketch_alloc::get_lower_bound(uint8_t num_std_devs) const { if (!is_estimation_mode()) return get_num_retained(); return binomial_bounds::get_lower_bound(get_num_retained(), get_theta(), num_std_devs); } template double base_theta_sketch_alloc::get_upper_bound(uint8_t num_std_devs) const { if (!is_estimation_mode()) return get_num_retained(); return binomial_bounds::get_upper_bound(get_num_retained(), get_theta(), num_std_devs); } template string base_theta_sketch_alloc::to_string(bool print_details) const { // Using a temporary stream for implementation here does not comply with AllocatorAwareContainer requirements. // The stream does not support passing an allocator instance, and alternatives are complicated. std::ostringstream os; os << "### Theta sketch summary:" << std::endl; os << " num retained entries : " << this->get_num_retained() << std::endl; os << " seed hash : " << this->get_seed_hash() << std::endl; os << " empty? : " << (this->is_empty() ? "true" : "false") << std::endl; os << " ordered? : " << (this->is_ordered() ? "true" : "false") << std::endl; os << " estimation mode? : " << (this->is_estimation_mode() ? "true" : "false") << std::endl; os << " theta (fraction) : " << this->get_theta() << std::endl; os << " theta (raw 64-bit) : " << this->get_theta64() << std::endl; os << " estimate : " << this->get_estimate() << std::endl; os << " lower bound 95% conf : " << this->get_lower_bound(2) << std::endl; os << " upper bound 95% conf : " << this->get_upper_bound(2) << std::endl; print_specifics(os); os << "### End sketch summary" << std::endl; if (print_details) { print_items(os); } return string(os.str().c_str(), this->get_allocator()); } template void theta_sketch_alloc::print_items(std::ostringstream& os) const { os << "### Retained entries" << std::endl; for (const auto& hash: *this) { os << hash << std::endl; } os << "### End retained entries" << std::endl; } // update sketch template update_theta_sketch_alloc::update_theta_sketch_alloc(uint8_t lg_cur_size, uint8_t lg_nom_size, resize_factor rf, float p, uint64_t theta, uint64_t seed, const A& allocator): table_(lg_cur_size, lg_nom_size, rf, p, theta, seed, allocator) {} template A update_theta_sketch_alloc::get_allocator() const { return table_.allocator_; } template bool update_theta_sketch_alloc::is_empty() const { return table_.is_empty_; } template bool update_theta_sketch_alloc::is_ordered() const { return table_.num_entries_ > 1 ? false : true; } template uint64_t update_theta_sketch_alloc::get_theta64() const { return is_empty() ? theta_constants::MAX_THETA : table_.theta_; } template uint32_t update_theta_sketch_alloc::get_num_retained() const { return table_.num_entries_; } template uint16_t update_theta_sketch_alloc::get_seed_hash() const { return compute_seed_hash(table_.seed_); } template uint8_t update_theta_sketch_alloc::get_lg_k() const { return table_.lg_nom_size_; } template auto update_theta_sketch_alloc::get_rf() const -> resize_factor { return table_.rf_; } template void update_theta_sketch_alloc::update(uint64_t value) { update(&value, sizeof(value)); } template void update_theta_sketch_alloc::update(int64_t value) { update(&value, sizeof(value)); } template void update_theta_sketch_alloc::update(uint32_t value) { update(static_cast(value)); } template void update_theta_sketch_alloc::update(int32_t value) { update(static_cast(value)); } template void update_theta_sketch_alloc::update(uint16_t value) { update(static_cast(value)); } template void update_theta_sketch_alloc::update(int16_t value) { update(static_cast(value)); } template void update_theta_sketch_alloc::update(uint8_t value) { update(static_cast(value)); } template void update_theta_sketch_alloc::update(int8_t value) { update(static_cast(value)); } template void update_theta_sketch_alloc::update(double value) { update(canonical_double(value)); } template void update_theta_sketch_alloc::update(float value) { update(static_cast(value)); } template void update_theta_sketch_alloc::update(const std::string& value) { if (value.empty()) return; update(value.c_str(), value.length()); } template void update_theta_sketch_alloc::update(const void* data, size_t length) { const uint64_t hash = table_.hash_and_screen(data, length); if (hash == 0) return; auto result = table_.find(hash); if (!result.second) { table_.insert(result.first, hash); } } template void update_theta_sketch_alloc::trim() { table_.trim(); } template void update_theta_sketch_alloc::reset() { table_.reset(); } template auto update_theta_sketch_alloc::begin() -> iterator { return iterator(table_.entries_, 1 << table_.lg_cur_size_, 0); } template auto update_theta_sketch_alloc::end() -> iterator { return iterator(nullptr, 0, 1 << table_.lg_cur_size_); } template auto update_theta_sketch_alloc::begin() const -> const_iterator { return const_iterator(table_.entries_, 1 << table_.lg_cur_size_, 0); } template auto update_theta_sketch_alloc::end() const -> const_iterator { return const_iterator(nullptr, 0, 1 << table_.lg_cur_size_); } template compact_theta_sketch_alloc update_theta_sketch_alloc::compact(bool ordered) const { return compact_theta_sketch_alloc(*this, ordered); } template void update_theta_sketch_alloc::print_specifics(std::ostringstream& os) const { os << " lg nominal size : " << static_cast(table_.lg_nom_size_) << std::endl; os << " lg current size : " << static_cast(table_.lg_cur_size_) << std::endl; os << " resize factor : " << (1 << table_.rf_) << std::endl; } // builder template update_theta_sketch_alloc::builder::builder(const A& allocator): theta_base_builder(allocator) {} template update_theta_sketch_alloc update_theta_sketch_alloc::builder::build() const { return update_theta_sketch_alloc(this->starting_lg_size(), this->lg_k_, this->rf_, this->p_, this->starting_theta(), this->seed_, this->allocator_); } // compact sketch template template compact_theta_sketch_alloc::compact_theta_sketch_alloc(const Other& other, bool ordered): is_empty_(other.is_empty()), is_ordered_(other.is_ordered() || ordered), seed_hash_(other.get_seed_hash()), theta_(other.get_theta64()), entries_(other.get_allocator()) { if (!other.is_empty()) { entries_.reserve(other.get_num_retained()); std::copy(other.begin(), other.end(), std::back_inserter(entries_)); if (ordered && !other.is_ordered()) std::sort(entries_.begin(), entries_.end()); } } template compact_theta_sketch_alloc::compact_theta_sketch_alloc(bool is_empty, bool is_ordered, uint16_t seed_hash, uint64_t theta, std::vector&& entries): is_empty_(is_empty), is_ordered_(is_ordered || (entries.size() <= 1ULL)), seed_hash_(seed_hash), theta_(theta), entries_(std::move(entries)) {} template A compact_theta_sketch_alloc::get_allocator() const { return entries_.get_allocator(); } template bool compact_theta_sketch_alloc::is_empty() const { return is_empty_; } template bool compact_theta_sketch_alloc::is_ordered() const { return is_ordered_; } template uint64_t compact_theta_sketch_alloc::get_theta64() const { return theta_; } template uint32_t compact_theta_sketch_alloc::get_num_retained() const { return static_cast(entries_.size()); } template uint16_t compact_theta_sketch_alloc::get_seed_hash() const { return seed_hash_; } template auto compact_theta_sketch_alloc::begin() -> iterator { return iterator(entries_.data(), static_cast(entries_.size()), 0); } template auto compact_theta_sketch_alloc::end() -> iterator { return iterator(nullptr, 0, static_cast(entries_.size())); } template auto compact_theta_sketch_alloc::begin() const -> const_iterator { return const_iterator(entries_.data(), static_cast(entries_.size()), 0); } template auto compact_theta_sketch_alloc::end() const -> const_iterator { return const_iterator(nullptr, 0, static_cast(entries_.size())); } template void compact_theta_sketch_alloc::print_specifics(std::ostringstream&) const {} template void compact_theta_sketch_alloc::serialize(std::ostream& os) const { const uint8_t preamble_longs = this->is_estimation_mode() ? 3 : this->is_empty() || entries_.size() == 1 ? 1 : 2; write(os, preamble_longs); write(os, UNCOMPRESSED_SERIAL_VERSION); write(os, SKETCH_TYPE); write(os, 0); // unused const uint8_t flags_byte( (1 << flags::IS_COMPACT) | (1 << flags::IS_READ_ONLY) | (this->is_empty() ? 1 << flags::IS_EMPTY : 0) | (this->is_ordered() ? 1 << flags::IS_ORDERED : 0) ); write(os, flags_byte); write(os, get_seed_hash()); if (preamble_longs > 1) { write(os, static_cast(entries_.size())); write(os, 0); // unused } if (this->is_estimation_mode()) write(os, this->theta_); if (entries_.size() > 0) write(os, entries_.data(), entries_.size() * sizeof(uint64_t)); } template auto compact_theta_sketch_alloc::serialize(unsigned header_size_bytes) const -> vector_bytes { const uint8_t preamble_longs = this->is_estimation_mode() ? 3 : this->is_empty() || entries_.size() == 1 ? 1 : 2; const size_t size = header_size_bytes + sizeof(uint64_t) * preamble_longs + sizeof(uint64_t) * entries_.size(); vector_bytes bytes(size, 0, entries_.get_allocator()); uint8_t* ptr = bytes.data() + header_size_bytes; *ptr++ = preamble_longs; *ptr++ = UNCOMPRESSED_SERIAL_VERSION; *ptr++ = SKETCH_TYPE; ptr += sizeof(uint16_t); // unused const uint8_t flags_byte( (1 << flags::IS_COMPACT) | (1 << flags::IS_READ_ONLY) | (this->is_empty() ? 1 << flags::IS_EMPTY : 0) | (this->is_ordered() ? 1 << flags::IS_ORDERED : 0) ); *ptr++ = flags_byte; ptr += copy_to_mem(get_seed_hash(), ptr); if (preamble_longs > 1) { ptr += copy_to_mem(static_cast(entries_.size()), ptr); ptr += sizeof(uint32_t); // unused } if (this->is_estimation_mode()) ptr += copy_to_mem(theta_, ptr); if (entries_.size() > 0) ptr += copy_to_mem(entries_.data(), ptr, entries_.size() * sizeof(uint64_t)); return bytes; } template bool compact_theta_sketch_alloc::is_suitable_for_compression() const { if (!this->is_ordered() || entries_.size() == 0 || (entries_.size() == 1 && !this->is_estimation_mode())) return false; return true; } template void compact_theta_sketch_alloc::serialize_compressed(std::ostream& os) const { if (is_suitable_for_compression()) return serialize_version_4(os); return serialize(os); } template auto compact_theta_sketch_alloc::serialize_compressed(unsigned header_size_bytes) const -> vector_bytes { if (is_suitable_for_compression()) return serialize_version_4(header_size_bytes); return serialize(header_size_bytes); } template uint8_t compact_theta_sketch_alloc::compute_min_leading_zeros() const { // compression is based on leading zeros in deltas between ordered hash values // assumes ordered sketch uint64_t previous = 0; uint64_t ored = 0; for (const uint64_t entry: entries_) { const uint64_t delta = entry - previous; ored |= delta; previous = entry; } return count_leading_zeros_in_u64(ored); } template void compact_theta_sketch_alloc::serialize_version_4(std::ostream& os) const { const uint8_t preamble_longs = this->is_estimation_mode() ? 2 : 1; const uint8_t entry_bits = 64 - compute_min_leading_zeros(); // store num_entries as whole bytes since whole-byte blocks will follow (most probably) const uint8_t num_entries_bytes = whole_bytes_to_hold_bits(32 - count_leading_zeros_in_u32(static_cast(entries_.size()))); write(os, preamble_longs); write(os, COMPRESSED_SERIAL_VERSION); write(os, SKETCH_TYPE); write(os, entry_bits); write(os, num_entries_bytes); const uint8_t flags_byte( (1 << flags::IS_COMPACT) | (1 << flags::IS_READ_ONLY) | (1 << flags::IS_ORDERED) ); write(os, flags_byte); write(os, get_seed_hash()); if (this->is_estimation_mode()) write(os, this->theta_); uint32_t num_entries = static_cast(entries_.size()); for (unsigned i = 0; i < num_entries_bytes; ++i) { write(os, num_entries & 0xff); num_entries >>= 8; } uint64_t previous = 0; uint64_t deltas[8]; vector_bytes buffer(entry_bits, 0, entries_.get_allocator()); // block of 8 entries takes entry_bits bytes // pack blocks of 8 deltas unsigned i; for (i = 0; i + 7 < entries_.size(); i += 8) { for (unsigned j = 0; j < 8; ++j) { deltas[j] = entries_[i + j] - previous; previous = entries_[i + j]; } pack_bits_block8(deltas, buffer.data(), entry_bits); write(os, buffer.data(), buffer.size()); } // pack extra deltas if fewer than 8 of them left if (i < entries_.size()) { uint8_t offset = 0; uint8_t* ptr = buffer.data(); for (; i < entries_.size(); ++i) { const uint64_t delta = entries_[i] - previous; previous = entries_[i]; offset = pack_bits(delta, entry_bits, ptr, offset); } write(os, buffer.data(), ptr - buffer.data()); } } template auto compact_theta_sketch_alloc::serialize_version_4(unsigned header_size_bytes) const -> vector_bytes { const uint8_t preamble_longs = this->is_estimation_mode() ? 2 : 1; const uint8_t entry_bits = 64 - compute_min_leading_zeros(); const size_t compressed_bits = entry_bits * entries_.size(); // store num_entries as whole bytes since whole-byte blocks will follow (most probably) const uint8_t num_entries_bytes = whole_bytes_to_hold_bits(32 - count_leading_zeros_in_u32(static_cast(entries_.size()))); const size_t size = header_size_bytes + sizeof(uint64_t) * preamble_longs + num_entries_bytes + whole_bytes_to_hold_bits(compressed_bits); vector_bytes bytes(size, 0, entries_.get_allocator()); uint8_t* ptr = bytes.data() + header_size_bytes; *ptr++ = preamble_longs; *ptr++ = COMPRESSED_SERIAL_VERSION; *ptr++ = SKETCH_TYPE; *ptr++ = entry_bits; *ptr++ = num_entries_bytes; const uint8_t flags_byte( (1 << flags::IS_COMPACT) | (1 << flags::IS_READ_ONLY) | (1 << flags::IS_ORDERED) ); *ptr++ = flags_byte; ptr += copy_to_mem(get_seed_hash(), ptr); if (this->is_estimation_mode()) { ptr += copy_to_mem(theta_, ptr); } uint32_t num_entries = static_cast(entries_.size()); for (unsigned i = 0; i < num_entries_bytes; ++i) { *ptr++ = num_entries & 0xff; num_entries >>= 8; } uint64_t previous = 0; uint64_t deltas[8]; // pack blocks of 8 deltas unsigned i; for (i = 0; i + 7 < entries_.size(); i += 8) { for (unsigned j = 0; j < 8; ++j) { deltas[j] = entries_[i + j] - previous; previous = entries_[i + j]; } pack_bits_block8(deltas, ptr, entry_bits); ptr += entry_bits; } // pack extra deltas if fewer than 8 of them left uint8_t offset = 0; for (; i < entries_.size(); ++i) { const uint64_t delta = entries_[i] - previous; previous = entries_[i]; offset = pack_bits(delta, entry_bits, ptr, offset); } return bytes; } template compact_theta_sketch_alloc compact_theta_sketch_alloc::deserialize(std::istream& is, uint64_t seed, const A& allocator) { const auto preamble_longs = read(is); const auto serial_version = read(is); const auto type = read(is); checker::check_sketch_type(type, SKETCH_TYPE); switch (serial_version) { case 4: return deserialize_v4(preamble_longs, is, seed, allocator); case 3: return deserialize_v3(preamble_longs, is, seed, allocator); case 1: return deserialize_v1(preamble_longs, is, seed, allocator); case 2: return deserialize_v2(preamble_longs, is, seed, allocator); default: throw std::invalid_argument("unexpected sketch serialization version " + std::to_string(serial_version)); } } template compact_theta_sketch_alloc compact_theta_sketch_alloc::deserialize_v1( uint8_t, std::istream& is, uint64_t seed, const A& allocator) { const auto seed_hash = compute_seed_hash(seed); read(is); // unused read(is); // unused const auto num_entries = read(is); read(is); //unused const auto theta = read(is); std::vector entries(num_entries, 0, allocator); bool is_empty = (num_entries == 0) && (theta == theta_constants::MAX_THETA); if (!is_empty) read(is, entries.data(), sizeof(uint64_t) * entries.size()); if (!is.good()) throw std::runtime_error("error reading from std::istream"); return compact_theta_sketch_alloc(is_empty, true, seed_hash, theta, std::move(entries)); } template compact_theta_sketch_alloc compact_theta_sketch_alloc::deserialize_v2( uint8_t preamble_longs, std::istream& is, uint64_t seed, const A& allocator) { read(is); // unused read(is); // unused const uint16_t seed_hash = read(is); checker::check_seed_hash(seed_hash, compute_seed_hash(seed)); if (preamble_longs == 1) { if (!is.good()) throw std::runtime_error("error reading from std::istream"); std::vector entries(0, 0, allocator); return compact_theta_sketch_alloc(true, true, seed_hash, theta_constants::MAX_THETA, std::move(entries)); } else if (preamble_longs == 2) { const uint32_t num_entries = read(is); read(is); // unused std::vector entries(num_entries, 0, allocator); if (num_entries == 0) { return compact_theta_sketch_alloc(true, true, seed_hash, theta_constants::MAX_THETA, std::move(entries)); } read(is, entries.data(), entries.size() * sizeof(uint64_t)); if (!is.good()) throw std::runtime_error("error reading from std::istream"); return compact_theta_sketch_alloc(false, true, seed_hash, theta_constants::MAX_THETA, std::move(entries)); } else if (preamble_longs == 3) { const uint32_t num_entries = read(is); read(is); // unused const auto theta = read(is); bool is_empty = (num_entries == 0) && (theta == theta_constants::MAX_THETA); std::vector entries(num_entries, 0, allocator); if (is_empty) { if (!is.good()) throw std::runtime_error("error reading from std::istream"); return compact_theta_sketch_alloc(true, true, seed_hash, theta, std::move(entries)); } else { read(is, entries.data(), sizeof(uint64_t) * entries.size()); if (!is.good()) throw std::runtime_error("error reading from std::istream"); return compact_theta_sketch_alloc(false, true, seed_hash, theta, std::move(entries)); } } else { throw std::invalid_argument(std::to_string(preamble_longs) + " longs of premable, but expected 1, 2, or 3"); } } template compact_theta_sketch_alloc compact_theta_sketch_alloc::deserialize_v3( uint8_t preamble_longs, std::istream& is, uint64_t seed, const A& allocator) { read(is); // unused const auto flags_byte = read(is); const auto seed_hash = read(is); const bool is_empty = flags_byte & (1 << flags::IS_EMPTY); if (!is_empty) checker::check_seed_hash(seed_hash, compute_seed_hash(seed)); uint64_t theta = theta_constants::MAX_THETA; uint32_t num_entries = 0; if (!is_empty) { if (preamble_longs == 1) { num_entries = 1; } else { num_entries = read(is); read(is); // unused if (preamble_longs > 2) theta = read(is); } } std::vector entries(num_entries, 0, allocator); if (!is_empty) read(is, entries.data(), sizeof(uint64_t) * entries.size()); const bool is_ordered = flags_byte & (1 << flags::IS_ORDERED); if (!is.good()) throw std::runtime_error("error reading from std::istream"); return compact_theta_sketch_alloc(is_empty, is_ordered, seed_hash, theta, std::move(entries)); } template compact_theta_sketch_alloc compact_theta_sketch_alloc::deserialize_v4( uint8_t preamble_longs, std::istream& is, uint64_t seed, const A& allocator) { const auto entry_bits = read(is); const auto num_entries_bytes = read(is); const auto flags_byte = read(is); const auto seed_hash = read(is); const bool is_empty = flags_byte & (1 << flags::IS_EMPTY); if (!is_empty) checker::check_seed_hash(seed_hash, compute_seed_hash(seed)); uint64_t theta = theta_constants::MAX_THETA; if (preamble_longs > 1) theta = read(is); uint32_t num_entries = 0; for (unsigned i = 0; i < num_entries_bytes; ++i) { num_entries |= read(is) << (i << 3); } vector_bytes buffer(entry_bits, 0, allocator); // block of 8 entries takes entry_bits bytes std::vector entries(num_entries, 0, allocator); // unpack blocks of 8 deltas unsigned i; for (i = 0; i + 7 < num_entries; i += 8) { read(is, buffer.data(), buffer.size()); unpack_bits_block8(&entries[i], buffer.data(), entry_bits); } // unpack extra deltas if fewer than 8 of them left if (i < num_entries) read(is, buffer.data(), whole_bytes_to_hold_bits((num_entries - i) * entry_bits)); if (!is.good()) throw std::runtime_error("error reading from std::istream"); const uint8_t* ptr = buffer.data(); uint8_t offset = 0; for (; i < num_entries; ++i) { offset = unpack_bits(entries[i], entry_bits, ptr, offset); } // undo deltas uint64_t previous = 0; for (i = 0; i < num_entries; ++i) { entries[i] += previous; previous = entries[i]; } const bool is_ordered = flags_byte & (1 << flags::IS_ORDERED); return compact_theta_sketch_alloc(is_empty, is_ordered, seed_hash, theta, std::move(entries)); } template compact_theta_sketch_alloc compact_theta_sketch_alloc::deserialize(const void* bytes, size_t size, uint64_t seed, const A& allocator) { auto data = compact_theta_sketch_parser::parse(bytes, size, seed, false); if (data.entry_bits == 64) { // versions 1 to 3 const uint64_t* entries = reinterpret_cast(data.entries_start_ptr); return compact_theta_sketch_alloc(data.is_empty, data.is_ordered, data.seed_hash, data.theta, std::vector(entries, entries + data.num_entries, allocator)); } else { // version 4 std::vector entries(data.num_entries, 0, allocator); const uint8_t* ptr = reinterpret_cast(data.entries_start_ptr); // unpack blocks of 8 deltas unsigned i; for (i = 0; i + 7 < data.num_entries; i += 8) { unpack_bits_block8(&entries[i], ptr, data.entry_bits); ptr += data.entry_bits; } // unpack extra deltas if fewer than 8 of them left uint8_t offset = 0; for (; i < data.num_entries; ++i) { offset = unpack_bits(entries[i], data.entry_bits, ptr, offset); } // undo deltas uint64_t previous = 0; for (i = 0; i < data.num_entries; ++i) { entries[i] += previous; previous = entries[i]; } return compact_theta_sketch_alloc(data.is_empty, data.is_ordered, data.seed_hash, data.theta, std::move(entries)); } } // wrapped compact sketch template wrapped_compact_theta_sketch_alloc::wrapped_compact_theta_sketch_alloc(const data_type& data): data_(data) {} template const wrapped_compact_theta_sketch_alloc wrapped_compact_theta_sketch_alloc::wrap(const void* bytes, size_t size, uint64_t seed, bool dump_on_error) { return wrapped_compact_theta_sketch_alloc(compact_theta_sketch_parser::parse(bytes, size, seed, dump_on_error)); } template A wrapped_compact_theta_sketch_alloc::get_allocator() const { return A(); } template bool wrapped_compact_theta_sketch_alloc::is_empty() const { return data_.is_empty; } template bool wrapped_compact_theta_sketch_alloc::is_ordered() const { return data_.is_ordered; } template uint64_t wrapped_compact_theta_sketch_alloc::get_theta64() const { return data_.theta; } template uint32_t wrapped_compact_theta_sketch_alloc::get_num_retained() const { return data_.num_entries; } template uint16_t wrapped_compact_theta_sketch_alloc::get_seed_hash() const { return data_.seed_hash; } template auto wrapped_compact_theta_sketch_alloc::begin() const -> const_iterator { return const_iterator(data_.entries_start_ptr, data_.entry_bits, data_.num_entries, 0); } template auto wrapped_compact_theta_sketch_alloc::end() const -> const_iterator { return const_iterator(data_.entries_start_ptr, data_.entry_bits, data_.num_entries, data_.num_entries); } template void wrapped_compact_theta_sketch_alloc::print_specifics(std::ostringstream&) const {} template void wrapped_compact_theta_sketch_alloc::print_items(std::ostringstream& os) const { os << "### Retained entries" << std::endl; for (const auto hash: *this) { os << hash << std::endl; } os << "### End retained entries" << std::endl; } // assumes index == 0 or index == num_entries template wrapped_compact_theta_sketch_alloc::const_iterator::const_iterator( const void* ptr, uint8_t entry_bits, uint32_t num_entries, uint32_t index): ptr_(ptr), entry_bits_(entry_bits), num_entries_(num_entries), index_(index), previous_(0), is_block_mode_(num_entries_ >= 8), buf_i_(0), offset_(0) { if (entry_bits == 64) { // no compression ptr_ = reinterpret_cast(ptr) + index; } else if (index < num_entries) { if (is_block_mode_) { unpack_bits_block8(buffer_, reinterpret_cast(ptr_), entry_bits_); ptr_ = reinterpret_cast(ptr_) + entry_bits_; for (int i = 0; i < 8; ++i) { buffer_[i] += previous_; previous_ = buffer_[i]; } } else { offset_ = unpack_bits(buffer_[0], entry_bits_, reinterpret_cast(ptr_), offset_); buffer_[0] += previous_; previous_ = buffer_[0]; } } } template auto wrapped_compact_theta_sketch_alloc::const_iterator::operator++() -> const_iterator& { if (entry_bits_ == 64) { // no compression ptr_ = reinterpret_cast(ptr_) + 1; return *this; } ++index_; if (index_ < num_entries_) { if (is_block_mode_) { ++buf_i_; if (buf_i_ == 8) { buf_i_ = 0; if (index_ + 8 < num_entries_) { unpack_bits_block8(buffer_, reinterpret_cast(ptr_), entry_bits_); ptr_ = reinterpret_cast(ptr_) + entry_bits_; for (int i = 0; i < 8; ++i) { buffer_[i] += previous_; previous_ = buffer_[i]; } } else { is_block_mode_ = false; offset_ = unpack_bits(buffer_[0], entry_bits_, reinterpret_cast(ptr_), offset_); buffer_[0] += previous_; previous_ = buffer_[0]; } } } else { offset_ = unpack_bits(buffer_[0], entry_bits_, reinterpret_cast(ptr_), offset_); buffer_[0] += previous_; previous_ = buffer_[0]; } } return *this; } template auto wrapped_compact_theta_sketch_alloc::const_iterator::operator++(int) -> const_iterator { const_iterator tmp(*this); operator++(); return tmp; } template bool wrapped_compact_theta_sketch_alloc::const_iterator::operator!=(const const_iterator& other) const { if (entry_bits_ == 64) return ptr_ != other.ptr_; return index_ != other.index_; } template bool wrapped_compact_theta_sketch_alloc::const_iterator::operator==(const const_iterator& other) const { if (entry_bits_ == 64) return ptr_ == other.ptr_; return index_ == other.index_; } template auto wrapped_compact_theta_sketch_alloc::const_iterator::operator*() const -> reference { if (entry_bits_ == 64) return *reinterpret_cast(ptr_); return buffer_[buf_i_]; } template auto wrapped_compact_theta_sketch_alloc::const_iterator::operator->() const -> pointer { if (entry_bits_ == 64) return reinterpret_cast(ptr_); return buffer_ + buf_i_; } } /* namespace datasketches */ #endif