]> sigrok.org Git - pulseview.git/blobdiff - pv/data/segment.cpp
Random simplifications, cosmetics/whitespace/consistency fixes.
[pulseview.git] / pv / data / segment.cpp
index 483d97b13304b337801066ad84a0ae5dcdb57e62..88015c3639860597accf8bf23eb21ed98d499e53 100644 (file)
 
 #include "segment.hpp"
 
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <vector>
+#include <cassert>
+#include <cstdlib>
+#include <cstring>
 
 using std::lock_guard;
+using std::min;
 using std::recursive_mutex;
-using std::vector;
 
 namespace pv {
 namespace data {
 
+const uint64_t Segment::MaxChunkSize = 10*1024*1024;  /* 10MiB */
+
 Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
        sample_count_(0),
        start_time_(0),
        samplerate_(samplerate),
-       unit_size_(unit_size)
+       unit_size_(unit_size),
+       iterator_count_(0),
+       mem_optimization_requested_(false)
 {
        lock_guard<recursive_mutex> lock(mutex_);
        assert(unit_size_ > 0);
 
        // Determine the number of samples we can fit in one chunk
        // without exceeding MaxChunkSize
-       chunk_size_ = std::min(MaxChunkSize,
-               (MaxChunkSize / unit_size_) * unit_size_);
+       chunk_size_ = min(MaxChunkSize, (MaxChunkSize / unit_size_) * unit_size_);
 
        // Create the initial chunk
        current_chunk_ = new uint8_t[chunk_size_];
@@ -92,6 +93,12 @@ void Segment::free_unused_memory()
 {
        lock_guard<recursive_mutex> lock(mutex_);
 
+       // Do not mess with the data chunks if we have iterators pointing at them
+       if (iterator_count_ > 0) {
+               mem_optimization_requested_ = true;
+               return;
+       }
+
        // No more data will come in, so re-create the last chunk accordingly
        uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
        memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
@@ -110,8 +117,7 @@ void Segment::append_single_sample(void *data)
        // There will always be space for at least one sample in
        // the current chunk, so we do not need to test for space
 
-       memcpy(current_chunk_ + (used_samples_ * unit_size_),
-               data, unit_size_);
+       memcpy(current_chunk_ + (used_samples_ * unit_size_), data, unit_size_);
        used_samples_++;
        unused_samples_--;
 
@@ -179,7 +185,7 @@ uint8_t* Segment::get_raw_samples(uint64_t start, uint64_t count) const
        while (count > 0) {
                const uint8_t* chunk = data_chunks_[chunk_num];
 
-               uint64_t copy_size = std::min(count * unit_size_,
+               uint64_t copy_size = min(count * unit_size_,
                        chunk_size_ - chunk_offs);
 
                memcpy(dest_ptr, chunk + chunk_offs, copy_size);
@@ -194,12 +200,14 @@ uint8_t* Segment::get_raw_samples(uint64_t start, uint64_t count) const
        return dest;
 }
 
-SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start) const
+SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start)
 {
        SegmentRawDataIterator* it = new SegmentRawDataIterator;
 
        assert(start < sample_count_);
 
+       iterator_count_++;
+
        it->sample_index = start;
        it->chunk_num = (start * unit_size_) / chunk_size_;
        it->chunk_offs = (start * unit_size_) % chunk_size_;
@@ -209,18 +217,16 @@ SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start) cons
        return it;
 }
 
-void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase) const
+void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase)
 {
        lock_guard<recursive_mutex> lock(mutex_);
 
+       // Fail gracefully if we are asked to deliver data we don't have
        if (it->sample_index > sample_count_)
-       {
-               // Fail gracefully if we are asked to deliver data we don't have
                return;
-       } else {
-               it->sample_index += increase;
-               it->chunk_offs += (increase * unit_size_);
-       }
+
+       it->sample_index += increase;
+       it->chunk_offs += (increase * unit_size_);
 
        if (it->chunk_offs > (chunk_size_ - 1)) {
                it->chunk_num++;
@@ -231,11 +237,17 @@ void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t
        it->value = it->chunk + it->chunk_offs;
 }
 
-void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it) const
+void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it)
 {
        delete it;
-}
 
+       iterator_count_--;
+
+       if ((iterator_count_ == 0) && mem_optimization_requested_) {
+               mem_optimization_requested_ = false;
+               free_unused_memory();
+       }
+}
 
 } // namespace data
 } // namespace pv