]> sigrok.org Git - pulseview.git/blame - pv/data/segment.cpp
AnalogSignal: Use setting change handler for threshold display
[pulseview.git] / pv / data / segment.cpp
CommitLineData
28a4c9c5 1/*
b3f22de0 2 * This file is part of the PulseView project.
28a4c9c5 3 *
26a883ed 4 * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
28a4c9c5
JH
5 * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
efdec55a 18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
28a4c9c5
JH
19 */
20
f3d66e52 21#include "segment.hpp"
28a4c9c5 22
eb8269e3
UH
23#include <cassert>
24#include <cstdlib>
25#include <cstring>
26a883ed 26
3b68d03d 27using std::lock_guard;
6f925ba9 28using std::min;
3b68d03d 29using std::recursive_mutex;
7d29656f 30
51e77110 31namespace pv {
1b1ec774 32namespace data {
51e77110 33
51d3950f 34const uint64_t Segment::MaxChunkSize = 10 * 1024 * 1024; /* 10MiB */
dd3f9a41 35
f3d66e52 36Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
8dbbc7f0 37 sample_count_(0),
7f4038d6 38 start_time_(0),
ff008de6 39 samplerate_(samplerate),
c70e3464
SA
40 unit_size_(unit_size),
41 iterator_count_(0),
42 mem_optimization_requested_(false)
f556bc6a 43{
8dbbc7f0
JH
44 lock_guard<recursive_mutex> lock(mutex_);
45 assert(unit_size_ > 0);
26a883ed
SA
46
47 // Determine the number of samples we can fit in one chunk
48 // without exceeding MaxChunkSize
6f925ba9 49 chunk_size_ = min(MaxChunkSize, (MaxChunkSize / unit_size_) * unit_size_);
26a883ed
SA
50
51 // Create the initial chunk
52 current_chunk_ = new uint8_t[chunk_size_];
53 data_chunks_.push_back(current_chunk_);
54 used_samples_ = 0;
55 unused_samples_ = chunk_size_ / unit_size_;
f556bc6a
JH
56}
57
f3d66e52 58Segment::~Segment()
28a4c9c5 59{
8dbbc7f0 60 lock_guard<recursive_mutex> lock(mutex_);
26a883ed
SA
61
62 for (uint8_t* chunk : data_chunks_)
63 delete[] chunk;
28a4c9c5
JH
64}
65
f3d66e52 66uint64_t Segment::get_sample_count() const
28a4c9c5 67{
8dbbc7f0
JH
68 lock_guard<recursive_mutex> lock(mutex_);
69 return sample_count_;
f556bc6a
JH
70}
71
60d9b99a 72const pv::util::Timestamp& Segment::start_time() const
7f4038d6
JH
73{
74 return start_time_;
75}
76
f3d66e52 77double Segment::samplerate() const
ff008de6
JH
78{
79 return samplerate_;
80}
81
f3d66e52 82void Segment::set_samplerate(double samplerate)
ff008de6
JH
83{
84 samplerate_ = samplerate;
85}
86
f3d66e52 87unsigned int Segment::unit_size() const
6fd14a32 88{
8dbbc7f0 89 return unit_size_;
6fd14a32
JH
90}
91
5e6967cb
SA
92void Segment::free_unused_memory()
93{
94 lock_guard<recursive_mutex> lock(mutex_);
95
c70e3464
SA
96 // Do not mess with the data chunks if we have iterators pointing at them
97 if (iterator_count_ > 0) {
98 mem_optimization_requested_ = true;
99 return;
100 }
101
5e6967cb
SA
102 // No more data will come in, so re-create the last chunk accordingly
103 uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
104 memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
105
106 delete[] current_chunk_;
107 current_chunk_ = resized_chunk;
108
109 data_chunks_.pop_back();
110 data_chunks_.push_back(resized_chunk);
111}
112
26a883ed
SA
113void Segment::append_single_sample(void *data)
114{
115 lock_guard<recursive_mutex> lock(mutex_);
116
117 // There will always be space for at least one sample in
118 // the current chunk, so we do not need to test for space
119
c063290a 120 memcpy(current_chunk_ + (used_samples_ * unit_size_), data, unit_size_);
26a883ed
SA
121 used_samples_++;
122 unused_samples_--;
123
124 if (unused_samples_ == 0) {
125 current_chunk_ = new uint8_t[chunk_size_];
126 data_chunks_.push_back(current_chunk_);
127 used_samples_ = 0;
128 unused_samples_ = chunk_size_ / unit_size_;
129 }
130
131 sample_count_++;
132}
133
134void Segment::append_samples(void* data, uint64_t samples)
27d7c96b 135{
8dbbc7f0 136 lock_guard<recursive_mutex> lock(mutex_);
27d7c96b 137
257211b8
SA
138 const uint8_t* data_byte_ptr = (uint8_t*)data;
139 uint64_t remaining_samples = samples;
140 uint64_t data_offset = 0;
141
142 do {
143 uint64_t copy_count = 0;
144
145 if (remaining_samples <= unused_samples_) {
146 // All samples fit into the current chunk
147 copy_count = remaining_samples;
148 } else {
149 // Only a part of the samples fit, fill up current chunk
150 copy_count = unused_samples_;
151 }
152
153 const uint8_t* dest = &(current_chunk_[used_samples_ * unit_size_]);
154 const uint8_t* src = &(data_byte_ptr[data_offset]);
155 memcpy((void*)dest, (void*)src, (copy_count * unit_size_));
156
157 used_samples_ += copy_count;
158 unused_samples_ -= copy_count;
159 remaining_samples -= copy_count;
160 data_offset += (copy_count * unit_size_);
161
162 if (unused_samples_ == 0) {
163 // If we're out of memory, this will throw std::bad_alloc
164 current_chunk_ = new uint8_t[chunk_size_];
165 data_chunks_.push_back(current_chunk_);
166 used_samples_ = 0;
167 unused_samples_ = chunk_size_ / unit_size_;
168 }
169 } while (remaining_samples > 0);
26a883ed
SA
170
171 sample_count_ += samples;
27d7c96b
DK
172}
173
b82243f7
SA
174void Segment::get_raw_samples(uint64_t start, uint64_t count,
175 uint8_t* dest) const
27d7c96b 176{
26a883ed
SA
177 assert(start < sample_count_);
178 assert(start + count <= sample_count_);
179 assert(count > 0);
b82243f7 180 assert(dest != nullptr);
26a883ed 181
8dbbc7f0 182 lock_guard<recursive_mutex> lock(mutex_);
26a883ed 183
26a883ed
SA
184 uint8_t* dest_ptr = dest;
185
186 uint64_t chunk_num = (start * unit_size_) / chunk_size_;
187 uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
188
189 while (count > 0) {
190 const uint8_t* chunk = data_chunks_[chunk_num];
191
6f925ba9 192 uint64_t copy_size = min(count * unit_size_,
26a883ed
SA
193 chunk_size_ - chunk_offs);
194
195 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
196
197 dest_ptr += copy_size;
198 count -= (copy_size / unit_size_);
199
200 chunk_num++;
201 chunk_offs = 0;
202 }
27d7c96b
DK
203}
204
c70e3464 205SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start)
26a883ed
SA
206{
207 SegmentRawDataIterator* it = new SegmentRawDataIterator;
208
209 assert(start < sample_count_);
210
c70e3464
SA
211 iterator_count_++;
212
26a883ed
SA
213 it->sample_index = start;
214 it->chunk_num = (start * unit_size_) / chunk_size_;
215 it->chunk_offs = (start * unit_size_) % chunk_size_;
216 it->chunk = data_chunks_[it->chunk_num];
217 it->value = it->chunk + it->chunk_offs;
218
219 return it;
220}
221
c70e3464 222void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase)
f556bc6a 223{
c063290a 224 // Fail gracefully if we are asked to deliver data we don't have
26a883ed 225 if (it->sample_index > sample_count_)
26a883ed 226 return;
c063290a
UH
227
228 it->sample_index += increase;
229 it->chunk_offs += (increase * unit_size_);
26a883ed
SA
230
231 if (it->chunk_offs > (chunk_size_ - 1)) {
232 it->chunk_num++;
233 it->chunk_offs -= chunk_size_;
234 it->chunk = data_chunks_[it->chunk_num];
235 }
27d7c96b 236
26a883ed
SA
237 it->value = it->chunk + it->chunk_offs;
238}
27d7c96b 239
c70e3464 240void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it)
26a883ed
SA
241{
242 delete it;
c70e3464
SA
243
244 iterator_count_--;
245
246 if ((iterator_count_ == 0) && mem_optimization_requested_) {
247 mem_optimization_requested_ = false;
248 free_unused_memory();
249 }
28a4c9c5 250}
51e77110 251
1b1ec774 252} // namespace data
51e77110 253} // namespace pv