]> sigrok.org Git - pulseview.git/blob - pv/data/segment.cpp
Rework all subthread-based workers to make notifications more robust
[pulseview.git] / pv / data / segment.cpp
1 /*
2  * This file is part of the PulseView project.
3  *
4  * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5  * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include "segment.hpp"
22
23 #include <cassert>
24 #include <cstdlib>
25 #include <cstring>
26
27 #include <QDebug>
28
29 using std::bad_alloc;
30 using std::lock_guard;
31 using std::min;
32 using std::recursive_mutex;
33
34 namespace pv {
35 namespace data {
36
37 const uint64_t Segment::MaxChunkSize = 10 * 1024 * 1024;  /* 10MiB */
38
39 Segment::Segment(uint32_t segment_id, uint64_t samplerate, unsigned int unit_size) :
40         segment_id_(segment_id),
41         sample_count_(0),
42         start_time_(0),
43         samplerate_(samplerate),
44         unit_size_(unit_size),
45         iterator_count_(0),
46         mem_optimization_requested_(false),
47         is_complete_(false)
48 {
49         lock_guard<recursive_mutex> lock(mutex_);
50         assert(unit_size_ > 0);
51
52         // Determine the number of samples we can fit in one chunk
53         // without exceeding MaxChunkSize
54         chunk_size_ = min(MaxChunkSize, (MaxChunkSize / unit_size_) * unit_size_);
55
56         // Create the initial chunk
57         current_chunk_ = new uint8_t[chunk_size_ + 7];  /* FIXME +7 is workaround for #1284 */
58         data_chunks_.push_back(current_chunk_);
59         used_samples_ = 0;
60         unused_samples_ = chunk_size_ / unit_size_;
61 }
62
63 Segment::~Segment()
64 {
65         lock_guard<recursive_mutex> lock(mutex_);
66
67         for (uint8_t* chunk : data_chunks_)
68                 delete[] chunk;
69 }
70
71 uint64_t Segment::get_sample_count() const
72 {
73         return sample_count_;
74 }
75
76 const pv::util::Timestamp& Segment::start_time() const
77 {
78         return start_time_;
79 }
80
81 double Segment::samplerate() const
82 {
83         return samplerate_;
84 }
85
86 void Segment::set_samplerate(double samplerate)
87 {
88         samplerate_ = samplerate;
89 }
90
91 unsigned int Segment::unit_size() const
92 {
93         return unit_size_;
94 }
95
96 uint32_t Segment::segment_id() const
97 {
98         return segment_id_;
99 }
100
101 void Segment::set_complete()
102 {
103         is_complete_ = true;
104
105         completed();
106 }
107
108 bool Segment::is_complete() const
109 {
110         return is_complete_;
111 }
112
113 void Segment::free_unused_memory()
114 {
115         lock_guard<recursive_mutex> lock(mutex_);
116
117         // Do not mess with the data chunks if we have iterators pointing at them
118         if (iterator_count_ > 0) {
119                 mem_optimization_requested_ = true;
120                 return;
121         }
122
123         if (current_chunk_) {
124                 // No more data will come in, so re-create the last chunk accordingly
125                 uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_ + 7];  /* FIXME +7 is workaround for #1284 */
126                 memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
127
128                 delete[] current_chunk_;
129                 current_chunk_ = resized_chunk;
130
131                 data_chunks_.pop_back();
132                 data_chunks_.push_back(resized_chunk);
133         }
134 }
135
136 void Segment::append_single_sample(void *data)
137 {
138         lock_guard<recursive_mutex> lock(mutex_);
139
140         // There will always be space for at least one sample in
141         // the current chunk, so we do not need to test for space
142
143         memcpy(current_chunk_ + (used_samples_ * unit_size_), data, unit_size_);
144         used_samples_++;
145         unused_samples_--;
146
147         if (unused_samples_ == 0) {
148                 current_chunk_ = new uint8_t[chunk_size_ + 7];  /* FIXME +7 is workaround for #1284 */
149                 data_chunks_.push_back(current_chunk_);
150                 used_samples_ = 0;
151                 unused_samples_ = chunk_size_ / unit_size_;
152         }
153
154         sample_count_++;
155 }
156
157 void Segment::append_samples(void* data, uint64_t samples)
158 {
159         lock_guard<recursive_mutex> lock(mutex_);
160
161         const uint8_t* data_byte_ptr = (uint8_t*)data;
162         uint64_t remaining_samples = samples;
163         uint64_t data_offset = 0;
164
165         do {
166                 uint64_t copy_count = 0;
167
168                 if (remaining_samples <= unused_samples_) {
169                         // All samples fit into the current chunk
170                         copy_count = remaining_samples;
171                 } else {
172                         // Only a part of the samples fit, fill up current chunk
173                         copy_count = unused_samples_;
174                 }
175
176                 const uint8_t* dest = &(current_chunk_[used_samples_ * unit_size_]);
177                 const uint8_t* src = &(data_byte_ptr[data_offset]);
178                 memcpy((void*)dest, (void*)src, (copy_count * unit_size_));
179
180                 used_samples_ += copy_count;
181                 unused_samples_ -= copy_count;
182                 remaining_samples -= copy_count;
183                 data_offset += (copy_count * unit_size_);
184
185                 if (unused_samples_ == 0) {
186                         try {
187                                 // If we're out of memory, allocating a chunk will throw
188                                 // std::bad_alloc. To give the application some usable memory
189                                 // to work with in case chunk allocation fails, we allocate
190                                 // extra memory and throw it away if it all succeeded.
191                                 // This way, memory allocation will fail early enough to let
192                                 // PV remain alive. Otherwise, PV will crash in a random
193                                 // memory-allocating part of the application.
194                                 current_chunk_ = new uint8_t[chunk_size_ + 7];  /* FIXME +7 is workaround for #1284 */
195
196                                 const int dummy_size = 2 * chunk_size_;
197                                 auto dummy_chunk = new uint8_t[dummy_size];
198                                 memset(dummy_chunk, 0xFF, dummy_size);
199                                 delete[] dummy_chunk;
200                         } catch (bad_alloc&) {
201                                 delete[] current_chunk_;  // The new may have succeeded
202                                 current_chunk_ = nullptr;
203                                 throw;
204                         }
205
206                         data_chunks_.push_back(current_chunk_);
207                         used_samples_ = 0;
208                         unused_samples_ = chunk_size_ / unit_size_;
209                 }
210         } while (remaining_samples > 0);
211
212         sample_count_ += samples;
213 }
214
215 const uint8_t* Segment::get_raw_sample(uint64_t sample_num) const
216 {
217         assert(sample_num <= sample_count_);
218
219         uint64_t chunk_num = (sample_num * unit_size_) / chunk_size_;
220         uint64_t chunk_offs = (sample_num * unit_size_) % chunk_size_;
221
222         lock_guard<recursive_mutex> lock(mutex_);  // Because of free_unused_memory()
223
224         const uint8_t* chunk = data_chunks_[chunk_num];
225
226         return chunk + chunk_offs;
227 }
228
229 void Segment::get_raw_samples(uint64_t start, uint64_t count, uint8_t* dest) const
230 {
231         assert(start < sample_count_);
232         assert(start + count <= sample_count_);
233         assert(count > 0);
234         assert(dest != nullptr);
235
236         uint8_t* dest_ptr = dest;
237
238         uint64_t chunk_num = (start * unit_size_) / chunk_size_;
239         uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
240
241         lock_guard<recursive_mutex> lock(mutex_);  // Because of free_unused_memory()
242
243         while (count > 0) {
244                 const uint8_t* chunk = data_chunks_[chunk_num];
245
246                 uint64_t copy_size = min(count * unit_size_,
247                         chunk_size_ - chunk_offs);
248
249                 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
250
251                 dest_ptr += copy_size;
252                 count -= (copy_size / unit_size_);
253
254                 chunk_num++;
255                 chunk_offs = 0;
256         }
257 }
258
259 SegmentDataIterator* Segment::begin_sample_iteration(uint64_t start)
260 {
261         SegmentDataIterator* it = new SegmentDataIterator;
262
263         assert(start < sample_count_);
264
265         iterator_count_++;
266
267         it->sample_index = start;
268         it->chunk_num = (start * unit_size_) / chunk_size_;
269         it->chunk_offs = (start * unit_size_) % chunk_size_;
270         it->chunk = data_chunks_[it->chunk_num];
271
272         return it;
273 }
274
275 void Segment::continue_sample_iteration(SegmentDataIterator* it, uint64_t increase)
276 {
277         it->sample_index += increase;
278         it->chunk_offs += (increase * unit_size_);
279
280         if (it->chunk_offs > (chunk_size_ - 1)) {
281                 it->chunk_num++;
282                 it->chunk_offs -= chunk_size_;
283                 it->chunk = data_chunks_[it->chunk_num];
284         }
285 }
286
287 void Segment::end_sample_iteration(SegmentDataIterator* it)
288 {
289         delete it;
290
291         iterator_count_--;
292
293         if ((iterator_count_ == 0) && mem_optimization_requested_) {
294                 mem_optimization_requested_ = false;
295                 free_unused_memory();
296         }
297 }
298
299 uint8_t* Segment::get_iterator_value(SegmentDataIterator* it)
300 {
301         assert(it->sample_index <= (sample_count_ - 1));
302
303         return (it->chunk + it->chunk_offs);
304 }
305
306 uint64_t Segment::get_iterator_valid_length(SegmentDataIterator* it)
307 {
308         assert(it->sample_index <= (sample_count_ - 1));
309
310         return ((chunk_size_ - it->chunk_offs) / unit_size_);
311 }
312
313 } // namespace data
314 } // namespace pv