]> sigrok.org Git - pulseview.git/blame_incremental - pv/data/segment.cpp
Segments: Fix iterator access to underlying value
[pulseview.git] / pv / data / segment.cpp
... / ...
CommitLineData
1/*
2 * This file is part of the PulseView project.
3 *
4 * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5 * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "segment.hpp"
22
23#include <cassert>
24#include <cstdlib>
25#include <cstring>
26
27#include <QDebug>
28
29using std::bad_alloc;
30using std::lock_guard;
31using std::min;
32using std::recursive_mutex;
33
34namespace pv {
35namespace data {
36
37const uint64_t Segment::MaxChunkSize = 10 * 1024 * 1024; /* 10MiB */
38
39Segment::Segment(uint32_t segment_id, uint64_t samplerate, unsigned int unit_size) :
40 segment_id_(segment_id),
41 sample_count_(0),
42 start_time_(0),
43 samplerate_(samplerate),
44 unit_size_(unit_size),
45 iterator_count_(0),
46 mem_optimization_requested_(false),
47 is_complete_(false)
48{
49 lock_guard<recursive_mutex> lock(mutex_);
50 assert(unit_size_ > 0);
51
52 // Determine the number of samples we can fit in one chunk
53 // without exceeding MaxChunkSize
54 chunk_size_ = min(MaxChunkSize, (MaxChunkSize / unit_size_) * unit_size_);
55
56 // Create the initial chunk
57 current_chunk_ = new uint8_t[chunk_size_];
58 data_chunks_.push_back(current_chunk_);
59 used_samples_ = 0;
60 unused_samples_ = chunk_size_ / unit_size_;
61}
62
63Segment::~Segment()
64{
65 lock_guard<recursive_mutex> lock(mutex_);
66
67 for (uint8_t* chunk : data_chunks_)
68 delete[] chunk;
69}
70
71uint64_t Segment::get_sample_count() const
72{
73 lock_guard<recursive_mutex> lock(mutex_);
74 return sample_count_;
75}
76
77const pv::util::Timestamp& Segment::start_time() const
78{
79 return start_time_;
80}
81
82double Segment::samplerate() const
83{
84 return samplerate_;
85}
86
87void Segment::set_samplerate(double samplerate)
88{
89 samplerate_ = samplerate;
90}
91
92unsigned int Segment::unit_size() const
93{
94 return unit_size_;
95}
96
97uint32_t Segment::segment_id() const
98{
99 return segment_id_;
100}
101
102void Segment::set_complete()
103{
104 is_complete_ = true;
105}
106
107bool Segment::is_complete() const
108{
109 return is_complete_;
110}
111
112void Segment::free_unused_memory()
113{
114 lock_guard<recursive_mutex> lock(mutex_);
115
116 // Do not mess with the data chunks if we have iterators pointing at them
117 if (iterator_count_ > 0) {
118 mem_optimization_requested_ = true;
119 return;
120 }
121
122 if (current_chunk_) {
123 // No more data will come in, so re-create the last chunk accordingly
124 uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
125 memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
126
127 delete[] current_chunk_;
128 current_chunk_ = resized_chunk;
129
130 data_chunks_.pop_back();
131 data_chunks_.push_back(resized_chunk);
132 }
133}
134
135void Segment::append_single_sample(void *data)
136{
137 lock_guard<recursive_mutex> lock(mutex_);
138
139 // There will always be space for at least one sample in
140 // the current chunk, so we do not need to test for space
141
142 memcpy(current_chunk_ + (used_samples_ * unit_size_), data, unit_size_);
143 used_samples_++;
144 unused_samples_--;
145
146 if (unused_samples_ == 0) {
147 current_chunk_ = new uint8_t[chunk_size_];
148 data_chunks_.push_back(current_chunk_);
149 used_samples_ = 0;
150 unused_samples_ = chunk_size_ / unit_size_;
151 }
152
153 sample_count_++;
154}
155
156void Segment::append_samples(void* data, uint64_t samples)
157{
158 lock_guard<recursive_mutex> lock(mutex_);
159
160 const uint8_t* data_byte_ptr = (uint8_t*)data;
161 uint64_t remaining_samples = samples;
162 uint64_t data_offset = 0;
163
164 do {
165 uint64_t copy_count = 0;
166
167 if (remaining_samples <= unused_samples_) {
168 // All samples fit into the current chunk
169 copy_count = remaining_samples;
170 } else {
171 // Only a part of the samples fit, fill up current chunk
172 copy_count = unused_samples_;
173 }
174
175 const uint8_t* dest = &(current_chunk_[used_samples_ * unit_size_]);
176 const uint8_t* src = &(data_byte_ptr[data_offset]);
177 memcpy((void*)dest, (void*)src, (copy_count * unit_size_));
178
179 used_samples_ += copy_count;
180 unused_samples_ -= copy_count;
181 remaining_samples -= copy_count;
182 data_offset += (copy_count * unit_size_);
183
184 if (unused_samples_ == 0) {
185 try {
186 // If we're out of memory, allocating a chunk will throw
187 // std::bad_alloc. To give the application some usable memory
188 // to work with in case chunk allocation fails, we allocate
189 // extra memory and throw it away if it all succeeded.
190 // This way, memory allocation will fail early enough to let
191 // PV remain alive. Otherwise, PV will crash in a random
192 // memory-allocating part of the application.
193 current_chunk_ = new uint8_t[chunk_size_];
194
195 const int dummy_size = 2 * chunk_size_;
196 auto dummy_chunk = new uint8_t[dummy_size];
197 memset(dummy_chunk, 0xFF, dummy_size);
198 delete[] dummy_chunk;
199 } catch (bad_alloc&) {
200 delete[] current_chunk_; // The new may have succeeded
201 current_chunk_ = nullptr;
202 throw;
203 }
204
205 data_chunks_.push_back(current_chunk_);
206 used_samples_ = 0;
207 unused_samples_ = chunk_size_ / unit_size_;
208 }
209 } while (remaining_samples > 0);
210
211 sample_count_ += samples;
212}
213
214void Segment::get_raw_samples(uint64_t start, uint64_t count,
215 uint8_t* dest) const
216{
217 assert(start < sample_count_);
218 assert(start + count <= sample_count_);
219 assert(count > 0);
220 assert(dest != nullptr);
221
222 lock_guard<recursive_mutex> lock(mutex_);
223
224 uint8_t* dest_ptr = dest;
225
226 uint64_t chunk_num = (start * unit_size_) / chunk_size_;
227 uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
228
229 while (count > 0) {
230 const uint8_t* chunk = data_chunks_[chunk_num];
231
232 uint64_t copy_size = min(count * unit_size_,
233 chunk_size_ - chunk_offs);
234
235 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
236
237 dest_ptr += copy_size;
238 count -= (copy_size / unit_size_);
239
240 chunk_num++;
241 chunk_offs = 0;
242 }
243}
244
245SegmentDataIterator* Segment::begin_sample_iteration(uint64_t start)
246{
247 SegmentDataIterator* it = new SegmentDataIterator;
248
249 assert(start < sample_count_);
250
251 iterator_count_++;
252
253 it->sample_index = start;
254 it->chunk_num = (start * unit_size_) / chunk_size_;
255 it->chunk_offs = (start * unit_size_) % chunk_size_;
256 it->chunk = data_chunks_[it->chunk_num];
257
258 return it;
259}
260
261void Segment::continue_sample_iteration(SegmentDataIterator* it, uint64_t increase)
262{
263 it->sample_index += increase;
264 it->chunk_offs += (increase * unit_size_);
265
266 if (it->chunk_offs > (chunk_size_ - 1)) {
267 it->chunk_num++;
268 it->chunk_offs -= chunk_size_;
269 it->chunk = data_chunks_[it->chunk_num];
270 }
271}
272
273void Segment::end_sample_iteration(SegmentDataIterator* it)
274{
275 delete it;
276
277 iterator_count_--;
278
279 if ((iterator_count_ == 0) && mem_optimization_requested_) {
280 mem_optimization_requested_ = false;
281 free_unused_memory();
282 }
283}
284
285uint8_t* Segment::get_iterator_value(SegmentDataIterator* it)
286{
287 assert(it->sample_index <= (sample_count_ - 1));
288
289 return (it->chunk + it->chunk_offs);
290}
291
292} // namespace data
293} // namespace pv