]> sigrok.org Git - pulseview.git/blame_incremental - pv/data/segment.cpp
Make sure PD row colors remain constant by adding a row index
[pulseview.git] / pv / data / segment.cpp
... / ...
CommitLineData
1/*
2 * This file is part of the PulseView project.
3 *
4 * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5 * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "segment.hpp"
22
23#include <cassert>
24#include <cstdlib>
25#include <cstring>
26
27using std::bad_alloc;
28using std::lock_guard;
29using std::min;
30using std::recursive_mutex;
31
32namespace pv {
33namespace data {
34
35const uint64_t Segment::MaxChunkSize = 10 * 1024 * 1024; /* 10MiB */
36
37Segment::Segment(uint32_t segment_id, uint64_t samplerate, unsigned int unit_size) :
38 segment_id_(segment_id),
39 sample_count_(0),
40 start_time_(0),
41 samplerate_(samplerate),
42 unit_size_(unit_size),
43 iterator_count_(0),
44 mem_optimization_requested_(false),
45 is_complete_(false)
46{
47 lock_guard<recursive_mutex> lock(mutex_);
48 assert(unit_size_ > 0);
49
50 // Determine the number of samples we can fit in one chunk
51 // without exceeding MaxChunkSize
52 chunk_size_ = min(MaxChunkSize, (MaxChunkSize / unit_size_) * unit_size_);
53
54 // Create the initial chunk
55 current_chunk_ = new uint8_t[chunk_size_];
56 data_chunks_.push_back(current_chunk_);
57 used_samples_ = 0;
58 unused_samples_ = chunk_size_ / unit_size_;
59}
60
61Segment::~Segment()
62{
63 lock_guard<recursive_mutex> lock(mutex_);
64
65 for (uint8_t* chunk : data_chunks_)
66 delete[] chunk;
67}
68
69uint64_t Segment::get_sample_count() const
70{
71 lock_guard<recursive_mutex> lock(mutex_);
72 return sample_count_;
73}
74
75const pv::util::Timestamp& Segment::start_time() const
76{
77 return start_time_;
78}
79
80double Segment::samplerate() const
81{
82 return samplerate_;
83}
84
85void Segment::set_samplerate(double samplerate)
86{
87 samplerate_ = samplerate;
88}
89
90unsigned int Segment::unit_size() const
91{
92 return unit_size_;
93}
94
95uint32_t Segment::segment_id() const
96{
97 return segment_id_;
98}
99
100void Segment::set_complete()
101{
102 is_complete_ = true;
103}
104
105bool Segment::is_complete() const
106{
107 return is_complete_;
108}
109
110void Segment::free_unused_memory()
111{
112 lock_guard<recursive_mutex> lock(mutex_);
113
114 // Do not mess with the data chunks if we have iterators pointing at them
115 if (iterator_count_ > 0) {
116 mem_optimization_requested_ = true;
117 return;
118 }
119
120 if (current_chunk_) {
121 // No more data will come in, so re-create the last chunk accordingly
122 uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
123 memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
124
125 delete[] current_chunk_;
126 current_chunk_ = resized_chunk;
127
128 data_chunks_.pop_back();
129 data_chunks_.push_back(resized_chunk);
130 }
131}
132
133void Segment::append_single_sample(void *data)
134{
135 lock_guard<recursive_mutex> lock(mutex_);
136
137 // There will always be space for at least one sample in
138 // the current chunk, so we do not need to test for space
139
140 memcpy(current_chunk_ + (used_samples_ * unit_size_), data, unit_size_);
141 used_samples_++;
142 unused_samples_--;
143
144 if (unused_samples_ == 0) {
145 current_chunk_ = new uint8_t[chunk_size_];
146 data_chunks_.push_back(current_chunk_);
147 used_samples_ = 0;
148 unused_samples_ = chunk_size_ / unit_size_;
149 }
150
151 sample_count_++;
152}
153
154void Segment::append_samples(void* data, uint64_t samples)
155{
156 lock_guard<recursive_mutex> lock(mutex_);
157
158 const uint8_t* data_byte_ptr = (uint8_t*)data;
159 uint64_t remaining_samples = samples;
160 uint64_t data_offset = 0;
161
162 do {
163 uint64_t copy_count = 0;
164
165 if (remaining_samples <= unused_samples_) {
166 // All samples fit into the current chunk
167 copy_count = remaining_samples;
168 } else {
169 // Only a part of the samples fit, fill up current chunk
170 copy_count = unused_samples_;
171 }
172
173 const uint8_t* dest = &(current_chunk_[used_samples_ * unit_size_]);
174 const uint8_t* src = &(data_byte_ptr[data_offset]);
175 memcpy((void*)dest, (void*)src, (copy_count * unit_size_));
176
177 used_samples_ += copy_count;
178 unused_samples_ -= copy_count;
179 remaining_samples -= copy_count;
180 data_offset += (copy_count * unit_size_);
181
182 if (unused_samples_ == 0) {
183 try {
184 // If we're out of memory, allocating a chunk will throw
185 // std::bad_alloc. To give the application some usable memory
186 // to work with in case chunk allocation fails, we allocate
187 // extra memory and throw it away if it all succeeded.
188 // This way, memory allocation will fail early enough to let
189 // PV remain alive. Otherwise, PV will crash in a random
190 // memory-allocating part of the application.
191 current_chunk_ = new uint8_t[chunk_size_];
192
193 const int dummy_size = 2 * chunk_size_;
194 auto dummy_chunk = new uint8_t[dummy_size];
195 memset(dummy_chunk, 0xFF, dummy_size);
196 delete[] dummy_chunk;
197 } catch (bad_alloc) {
198 delete[] current_chunk_; // The new may have succeeded
199 current_chunk_ = nullptr;
200 throw;
201 }
202
203 data_chunks_.push_back(current_chunk_);
204 used_samples_ = 0;
205 unused_samples_ = chunk_size_ / unit_size_;
206 }
207 } while (remaining_samples > 0);
208
209 sample_count_ += samples;
210}
211
212void Segment::get_raw_samples(uint64_t start, uint64_t count,
213 uint8_t* dest) const
214{
215 assert(start < sample_count_);
216 assert(start + count <= sample_count_);
217 assert(count > 0);
218 assert(dest != nullptr);
219
220 lock_guard<recursive_mutex> lock(mutex_);
221
222 uint8_t* dest_ptr = dest;
223
224 uint64_t chunk_num = (start * unit_size_) / chunk_size_;
225 uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
226
227 while (count > 0) {
228 const uint8_t* chunk = data_chunks_[chunk_num];
229
230 uint64_t copy_size = min(count * unit_size_,
231 chunk_size_ - chunk_offs);
232
233 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
234
235 dest_ptr += copy_size;
236 count -= (copy_size / unit_size_);
237
238 chunk_num++;
239 chunk_offs = 0;
240 }
241}
242
243SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start)
244{
245 SegmentRawDataIterator* it = new SegmentRawDataIterator;
246
247 assert(start < sample_count_);
248
249 iterator_count_++;
250
251 it->sample_index = start;
252 it->chunk_num = (start * unit_size_) / chunk_size_;
253 it->chunk_offs = (start * unit_size_) % chunk_size_;
254 it->chunk = data_chunks_[it->chunk_num];
255 it->value = it->chunk + it->chunk_offs;
256
257 return it;
258}
259
260void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase)
261{
262 // Fail gracefully if we are asked to deliver data we don't have
263 if (it->sample_index > sample_count_)
264 return;
265
266 it->sample_index += increase;
267 it->chunk_offs += (increase * unit_size_);
268
269 if (it->chunk_offs > (chunk_size_ - 1)) {
270 it->chunk_num++;
271 it->chunk_offs -= chunk_size_;
272 it->chunk = data_chunks_[it->chunk_num];
273 }
274
275 it->value = it->chunk + it->chunk_offs;
276}
277
278void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it)
279{
280 delete it;
281
282 iterator_count_--;
283
284 if ((iterator_count_ == 0) && mem_optimization_requested_) {
285 mem_optimization_requested_ = false;
286 free_unused_memory();
287 }
288}
289
290} // namespace data
291} // namespace pv