]> sigrok.org Git - pulseview.git/blame_incremental - pv/data/segment.cpp
Implement Trace::ShowLastCompleteSegmentOnly display mode
[pulseview.git] / pv / data / segment.cpp
... / ...
CommitLineData
1/*
2 * This file is part of the PulseView project.
3 *
4 * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5 * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "segment.hpp"
22
23#include <cassert>
24#include <cstdlib>
25#include <cstring>
26
27using std::lock_guard;
28using std::min;
29using std::recursive_mutex;
30
31namespace pv {
32namespace data {
33
34const uint64_t Segment::MaxChunkSize = 10 * 1024 * 1024; /* 10MiB */
35
36Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
37 sample_count_(0),
38 start_time_(0),
39 samplerate_(samplerate),
40 unit_size_(unit_size),
41 iterator_count_(0),
42 mem_optimization_requested_(false),
43 is_complete_(false)
44{
45 lock_guard<recursive_mutex> lock(mutex_);
46 assert(unit_size_ > 0);
47
48 // Determine the number of samples we can fit in one chunk
49 // without exceeding MaxChunkSize
50 chunk_size_ = min(MaxChunkSize, (MaxChunkSize / unit_size_) * unit_size_);
51
52 // Create the initial chunk
53 current_chunk_ = new uint8_t[chunk_size_];
54 data_chunks_.push_back(current_chunk_);
55 used_samples_ = 0;
56 unused_samples_ = chunk_size_ / unit_size_;
57}
58
59Segment::~Segment()
60{
61 lock_guard<recursive_mutex> lock(mutex_);
62
63 for (uint8_t* chunk : data_chunks_)
64 delete[] chunk;
65}
66
67uint64_t Segment::get_sample_count() const
68{
69 lock_guard<recursive_mutex> lock(mutex_);
70 return sample_count_;
71}
72
73const pv::util::Timestamp& Segment::start_time() const
74{
75 return start_time_;
76}
77
78double Segment::samplerate() const
79{
80 return samplerate_;
81}
82
83void Segment::set_samplerate(double samplerate)
84{
85 samplerate_ = samplerate;
86}
87
88unsigned int Segment::unit_size() const
89{
90 return unit_size_;
91}
92
93void Segment::set_complete()
94{
95 is_complete_ = true;
96}
97
98bool Segment::is_complete() const
99{
100 return is_complete_;
101}
102
103void Segment::free_unused_memory()
104{
105 lock_guard<recursive_mutex> lock(mutex_);
106
107 // Do not mess with the data chunks if we have iterators pointing at them
108 if (iterator_count_ > 0) {
109 mem_optimization_requested_ = true;
110 return;
111 }
112
113 // No more data will come in, so re-create the last chunk accordingly
114 uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
115 memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
116
117 delete[] current_chunk_;
118 current_chunk_ = resized_chunk;
119
120 data_chunks_.pop_back();
121 data_chunks_.push_back(resized_chunk);
122}
123
124void Segment::append_single_sample(void *data)
125{
126 lock_guard<recursive_mutex> lock(mutex_);
127
128 // There will always be space for at least one sample in
129 // the current chunk, so we do not need to test for space
130
131 memcpy(current_chunk_ + (used_samples_ * unit_size_), data, unit_size_);
132 used_samples_++;
133 unused_samples_--;
134
135 if (unused_samples_ == 0) {
136 current_chunk_ = new uint8_t[chunk_size_];
137 data_chunks_.push_back(current_chunk_);
138 used_samples_ = 0;
139 unused_samples_ = chunk_size_ / unit_size_;
140 }
141
142 sample_count_++;
143}
144
145void Segment::append_samples(void* data, uint64_t samples)
146{
147 lock_guard<recursive_mutex> lock(mutex_);
148
149 const uint8_t* data_byte_ptr = (uint8_t*)data;
150 uint64_t remaining_samples = samples;
151 uint64_t data_offset = 0;
152
153 do {
154 uint64_t copy_count = 0;
155
156 if (remaining_samples <= unused_samples_) {
157 // All samples fit into the current chunk
158 copy_count = remaining_samples;
159 } else {
160 // Only a part of the samples fit, fill up current chunk
161 copy_count = unused_samples_;
162 }
163
164 const uint8_t* dest = &(current_chunk_[used_samples_ * unit_size_]);
165 const uint8_t* src = &(data_byte_ptr[data_offset]);
166 memcpy((void*)dest, (void*)src, (copy_count * unit_size_));
167
168 used_samples_ += copy_count;
169 unused_samples_ -= copy_count;
170 remaining_samples -= copy_count;
171 data_offset += (copy_count * unit_size_);
172
173 if (unused_samples_ == 0) {
174 // If we're out of memory, this will throw std::bad_alloc
175 current_chunk_ = new uint8_t[chunk_size_];
176 data_chunks_.push_back(current_chunk_);
177 used_samples_ = 0;
178 unused_samples_ = chunk_size_ / unit_size_;
179 }
180 } while (remaining_samples > 0);
181
182 sample_count_ += samples;
183}
184
185void Segment::get_raw_samples(uint64_t start, uint64_t count,
186 uint8_t* dest) const
187{
188 assert(start < sample_count_);
189 assert(start + count <= sample_count_);
190 assert(count > 0);
191 assert(dest != nullptr);
192
193 lock_guard<recursive_mutex> lock(mutex_);
194
195 uint8_t* dest_ptr = dest;
196
197 uint64_t chunk_num = (start * unit_size_) / chunk_size_;
198 uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
199
200 while (count > 0) {
201 const uint8_t* chunk = data_chunks_[chunk_num];
202
203 uint64_t copy_size = min(count * unit_size_,
204 chunk_size_ - chunk_offs);
205
206 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
207
208 dest_ptr += copy_size;
209 count -= (copy_size / unit_size_);
210
211 chunk_num++;
212 chunk_offs = 0;
213 }
214}
215
216SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start)
217{
218 SegmentRawDataIterator* it = new SegmentRawDataIterator;
219
220 assert(start < sample_count_);
221
222 iterator_count_++;
223
224 it->sample_index = start;
225 it->chunk_num = (start * unit_size_) / chunk_size_;
226 it->chunk_offs = (start * unit_size_) % chunk_size_;
227 it->chunk = data_chunks_[it->chunk_num];
228 it->value = it->chunk + it->chunk_offs;
229
230 return it;
231}
232
233void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase)
234{
235 // Fail gracefully if we are asked to deliver data we don't have
236 if (it->sample_index > sample_count_)
237 return;
238
239 it->sample_index += increase;
240 it->chunk_offs += (increase * unit_size_);
241
242 if (it->chunk_offs > (chunk_size_ - 1)) {
243 it->chunk_num++;
244 it->chunk_offs -= chunk_size_;
245 it->chunk = data_chunks_[it->chunk_num];
246 }
247
248 it->value = it->chunk + it->chunk_offs;
249}
250
251void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it)
252{
253 delete it;
254
255 iterator_count_--;
256
257 if ((iterator_count_ == 0) && mem_optimization_requested_) {
258 mem_optimization_requested_ = false;
259 free_unused_memory();
260 }
261}
262
263} // namespace data
264} // namespace pv