]> sigrok.org Git - pulseview.git/blobdiff - pv/data/segment.cpp
Segment: Do not alter chunks when there are active iterators
[pulseview.git] / pv / data / segment.cpp
index f635fc387a59dc4156273ca0dd4d88602ca35a27..64d9cd66081a414aa1dc5460e64ec4cdc94d868e 100644 (file)
@@ -33,11 +33,15 @@ using std::vector;
 namespace pv {
 namespace data {
 
+const uint64_t Segment::MaxChunkSize = 10*1024*1024;  /* 10MiB */
+
 Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
        sample_count_(0),
        start_time_(0),
        samplerate_(samplerate),
-       unit_size_(unit_size)
+       unit_size_(unit_size),
+       iterator_count_(0),
+       mem_optimization_requested_(false)
 {
        lock_guard<recursive_mutex> lock(mutex_);
        assert(unit_size_ > 0);
@@ -88,6 +92,27 @@ unsigned int Segment::unit_size() const
        return unit_size_;
 }
 
+void Segment::free_unused_memory()
+{
+       lock_guard<recursive_mutex> lock(mutex_);
+
+       // Do not mess with the data chunks if we have iterators pointing at them
+       if (iterator_count_ > 0) {
+               mem_optimization_requested_ = true;
+               return;
+       }
+
+       // No more data will come in, so re-create the last chunk accordingly
+       uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
+       memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
+
+       delete[] current_chunk_;
+       current_chunk_ = resized_chunk;
+
+       data_chunks_.pop_back();
+       data_chunks_.push_back(resized_chunk);
+}
+
 void Segment::append_single_sample(void *data)
 {
        lock_guard<recursive_mutex> lock(mutex_);
@@ -179,12 +204,14 @@ uint8_t* Segment::get_raw_samples(uint64_t start, uint64_t count) const
        return dest;
 }
 
-SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start) const
+SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start)
 {
        SegmentRawDataIterator* it = new SegmentRawDataIterator;
 
        assert(start < sample_count_);
 
+       iterator_count_++;
+
        it->sample_index = start;
        it->chunk_num = (start * unit_size_) / chunk_size_;
        it->chunk_offs = (start * unit_size_) % chunk_size_;
@@ -194,7 +221,7 @@ SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start) cons
        return it;
 }
 
-void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase) const
+void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase)
 {
        lock_guard<recursive_mutex> lock(mutex_);
 
@@ -216,9 +243,16 @@ void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t
        it->value = it->chunk + it->chunk_offs;
 }
 
-void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it) const
+void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it)
 {
        delete it;
+
+       iterator_count_--;
+
+       if ((iterator_count_ == 0) && mem_optimization_requested_) {
+               mem_optimization_requested_ = false;
+               free_unused_memory();
+       }
 }