2 * This file is part of the PulseView project.
4 * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5 * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 #include "segment.hpp"
30 using std::lock_guard;
32 using std::recursive_mutex;
37 const uint64_t Segment::MaxChunkSize = 10 * 1024 * 1024; /* 10MiB */
39 Segment::Segment(uint32_t segment_id, uint64_t samplerate, unsigned int unit_size) :
40 segment_id_(segment_id),
43 samplerate_(samplerate),
44 unit_size_(unit_size),
46 mem_optimization_requested_(false),
49 assert(unit_size_ > 0);
51 // Determine the number of samples we can fit in one chunk
52 // without exceeding MaxChunkSize
53 chunk_size_ = min(MaxChunkSize, (MaxChunkSize / unit_size_) * unit_size_);
55 // Create the initial chunk
56 current_chunk_ = new uint8_t[chunk_size_ + 7]; /* FIXME +7 is workaround for #1284 */
57 data_chunks_.push_back(current_chunk_);
59 unused_samples_ = chunk_size_ / unit_size_;
64 lock_guard<recursive_mutex> lock(mutex_);
66 for (uint8_t* chunk : data_chunks_)
70 uint64_t Segment::get_sample_count() const
75 const pv::util::Timestamp& Segment::start_time() const
80 double Segment::samplerate() const
85 void Segment::set_samplerate(double samplerate)
87 samplerate_ = samplerate;
90 unsigned int Segment::unit_size() const
95 uint32_t Segment::segment_id() const
100 void Segment::set_complete()
107 bool Segment::is_complete() const
112 void Segment::free_unused_memory()
114 lock_guard<recursive_mutex> lock(mutex_);
116 // Do not mess with the data chunks if we have iterators pointing at them
117 if (iterator_count_ > 0) {
118 mem_optimization_requested_ = true;
122 if (current_chunk_) {
123 // No more data will come in, so re-create the last chunk accordingly
124 uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_ + 7]; /* FIXME +7 is workaround for #1284 */
125 memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
127 delete[] current_chunk_;
128 current_chunk_ = resized_chunk;
130 data_chunks_.pop_back();
131 data_chunks_.push_back(resized_chunk);
135 void Segment::append_single_sample(void *data)
137 lock_guard<recursive_mutex> lock(mutex_);
139 // There will always be space for at least one sample in
140 // the current chunk, so we do not need to test for space
142 memcpy(current_chunk_ + (used_samples_ * unit_size_), data, unit_size_);
146 if (unused_samples_ == 0) {
147 current_chunk_ = new uint8_t[chunk_size_ + 7]; /* FIXME +7 is workaround for #1284 */
148 data_chunks_.push_back(current_chunk_);
150 unused_samples_ = chunk_size_ / unit_size_;
156 void Segment::append_samples(void* data, uint64_t samples)
158 lock_guard<recursive_mutex> lock(mutex_);
160 const uint8_t* data_byte_ptr = (uint8_t*)data;
161 uint64_t remaining_samples = samples;
162 uint64_t data_offset = 0;
165 uint64_t copy_count = 0;
167 if (remaining_samples <= unused_samples_) {
168 // All samples fit into the current chunk
169 copy_count = remaining_samples;
171 // Only a part of the samples fit, fill up current chunk
172 copy_count = unused_samples_;
175 const uint8_t* dest = &(current_chunk_[used_samples_ * unit_size_]);
176 const uint8_t* src = &(data_byte_ptr[data_offset]);
177 memcpy((void*)dest, (void*)src, (copy_count * unit_size_));
179 used_samples_ += copy_count;
180 unused_samples_ -= copy_count;
181 remaining_samples -= copy_count;
182 data_offset += (copy_count * unit_size_);
184 if (unused_samples_ == 0) {
186 // If we're out of memory, allocating a chunk will throw
187 // std::bad_alloc. To give the application some usable memory
188 // to work with in case chunk allocation fails, we allocate
189 // extra memory and throw it away if it all succeeded.
190 // This way, memory allocation will fail early enough to let
191 // PV remain alive. Otherwise, PV will crash in a random
192 // memory-allocating part of the application.
193 current_chunk_ = new uint8_t[chunk_size_ + 7]; /* FIXME +7 is workaround for #1284 */
195 const int dummy_size = 2 * chunk_size_;
196 auto dummy_chunk = new uint8_t[dummy_size];
197 memset(dummy_chunk, 0xFF, dummy_size);
198 delete[] dummy_chunk;
199 } catch (bad_alloc&) {
200 delete[] current_chunk_; // The new may have succeeded
201 current_chunk_ = nullptr;
205 data_chunks_.push_back(current_chunk_);
207 unused_samples_ = chunk_size_ / unit_size_;
209 } while (remaining_samples > 0);
211 sample_count_ += samples;
214 const uint8_t* Segment::get_raw_sample(uint64_t sample_num) const
216 assert(sample_num <= sample_count_);
218 uint64_t chunk_num = (sample_num * unit_size_) / chunk_size_;
219 uint64_t chunk_offs = (sample_num * unit_size_) % chunk_size_;
221 lock_guard<recursive_mutex> lock(mutex_); // Because of free_unused_memory()
223 const uint8_t* chunk = data_chunks_[chunk_num];
225 return chunk + chunk_offs;
228 void Segment::get_raw_samples(uint64_t start, uint64_t count, uint8_t* dest) const
230 assert(start < sample_count_);
231 assert(start + count <= sample_count_);
233 assert(dest != nullptr);
235 uint8_t* dest_ptr = dest;
237 uint64_t chunk_num = (start * unit_size_) / chunk_size_;
238 uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
240 lock_guard<recursive_mutex> lock(mutex_); // Because of free_unused_memory()
243 const uint8_t* chunk = data_chunks_[chunk_num];
245 uint64_t copy_size = min(count * unit_size_,
246 chunk_size_ - chunk_offs);
248 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
250 dest_ptr += copy_size;
251 count -= (copy_size / unit_size_);
258 SegmentDataIterator* Segment::begin_sample_iteration(uint64_t start)
260 SegmentDataIterator* it = new SegmentDataIterator;
262 assert(start < sample_count_);
266 it->sample_index = start;
267 it->chunk_num = (start * unit_size_) / chunk_size_;
268 it->chunk_offs = (start * unit_size_) % chunk_size_;
269 it->chunk = data_chunks_[it->chunk_num];
274 void Segment::continue_sample_iteration(SegmentDataIterator* it, uint64_t increase)
276 it->sample_index += increase;
277 it->chunk_offs += (increase * unit_size_);
279 if (it->chunk_offs > (chunk_size_ - 1)) {
281 it->chunk_offs -= chunk_size_;
282 it->chunk = data_chunks_[it->chunk_num];
286 void Segment::end_sample_iteration(SegmentDataIterator* it)
292 if ((iterator_count_ == 0) && mem_optimization_requested_) {
293 mem_optimization_requested_ = false;
294 free_unused_memory();
298 uint8_t* Segment::get_iterator_value(SegmentDataIterator* it)
300 assert(it->sample_index <= (sample_count_ - 1));
302 return (it->chunk + it->chunk_offs);
305 uint64_t Segment::get_iterator_valid_length(SegmentDataIterator* it)
307 assert(it->sample_index <= (sample_count_ - 1));
309 return ((chunk_size_ - it->chunk_offs) / unit_size_);