]> sigrok.org Git - pulseview.git/blob - pv/data/segment.cpp
9b18c6499a1b32cf03362dc67e835f410acb9159
[pulseview.git] / pv / data / segment.cpp
1 /*
2  * This file is part of the PulseView project.
3  *
4  * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5  * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include "segment.hpp"
22
23 #include <cassert>
24 #include <cstdlib>
25 #include <cstring>
26
27 using std::lock_guard;
28 using std::min;
29 using std::recursive_mutex;
30
31 namespace pv {
32 namespace data {
33
34 const uint64_t Segment::MaxChunkSize = 10 * 1024 * 1024;  /* 10MiB */
35
36 Segment::Segment(uint32_t segment_id, uint64_t samplerate, unsigned int unit_size) :
37         segment_id_(segment_id),
38         sample_count_(0),
39         start_time_(0),
40         samplerate_(samplerate),
41         unit_size_(unit_size),
42         iterator_count_(0),
43         mem_optimization_requested_(false),
44         is_complete_(false)
45 {
46         lock_guard<recursive_mutex> lock(mutex_);
47         assert(unit_size_ > 0);
48
49         // Determine the number of samples we can fit in one chunk
50         // without exceeding MaxChunkSize
51         chunk_size_ = min(MaxChunkSize, (MaxChunkSize / unit_size_) * unit_size_);
52
53         // Create the initial chunk
54         current_chunk_ = new uint8_t[chunk_size_];
55         data_chunks_.push_back(current_chunk_);
56         used_samples_ = 0;
57         unused_samples_ = chunk_size_ / unit_size_;
58 }
59
60 Segment::~Segment()
61 {
62         lock_guard<recursive_mutex> lock(mutex_);
63
64         for (uint8_t* chunk : data_chunks_)
65                 delete[] chunk;
66 }
67
68 uint64_t Segment::get_sample_count() const
69 {
70         lock_guard<recursive_mutex> lock(mutex_);
71         return sample_count_;
72 }
73
74 const pv::util::Timestamp& Segment::start_time() const
75 {
76         return start_time_;
77 }
78
79 double Segment::samplerate() const
80 {
81         return samplerate_;
82 }
83
84 void Segment::set_samplerate(double samplerate)
85 {
86         samplerate_ = samplerate;
87 }
88
89 unsigned int Segment::unit_size() const
90 {
91         return unit_size_;
92 }
93
94 uint32_t Segment::segment_id() const
95 {
96         return segment_id_;
97 }
98
99 void Segment::set_complete()
100 {
101         is_complete_ = true;
102 }
103
104 bool Segment::is_complete() const
105 {
106         return is_complete_;
107 }
108
109 void Segment::free_unused_memory()
110 {
111         lock_guard<recursive_mutex> lock(mutex_);
112
113         // Do not mess with the data chunks if we have iterators pointing at them
114         if (iterator_count_ > 0) {
115                 mem_optimization_requested_ = true;
116                 return;
117         }
118
119         // No more data will come in, so re-create the last chunk accordingly
120         uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
121         memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
122
123         delete[] current_chunk_;
124         current_chunk_ = resized_chunk;
125
126         data_chunks_.pop_back();
127         data_chunks_.push_back(resized_chunk);
128 }
129
130 void Segment::append_single_sample(void *data)
131 {
132         lock_guard<recursive_mutex> lock(mutex_);
133
134         // There will always be space for at least one sample in
135         // the current chunk, so we do not need to test for space
136
137         memcpy(current_chunk_ + (used_samples_ * unit_size_), data, unit_size_);
138         used_samples_++;
139         unused_samples_--;
140
141         if (unused_samples_ == 0) {
142                 current_chunk_ = new uint8_t[chunk_size_];
143                 data_chunks_.push_back(current_chunk_);
144                 used_samples_ = 0;
145                 unused_samples_ = chunk_size_ / unit_size_;
146         }
147
148         sample_count_++;
149 }
150
151 void Segment::append_samples(void* data, uint64_t samples)
152 {
153         lock_guard<recursive_mutex> lock(mutex_);
154
155         const uint8_t* data_byte_ptr = (uint8_t*)data;
156         uint64_t remaining_samples = samples;
157         uint64_t data_offset = 0;
158
159         do {
160                 uint64_t copy_count = 0;
161
162                 if (remaining_samples <= unused_samples_) {
163                         // All samples fit into the current chunk
164                         copy_count = remaining_samples;
165                 } else {
166                         // Only a part of the samples fit, fill up current chunk
167                         copy_count = unused_samples_;
168                 }
169
170                 const uint8_t* dest = &(current_chunk_[used_samples_ * unit_size_]);
171                 const uint8_t* src = &(data_byte_ptr[data_offset]);
172                 memcpy((void*)dest, (void*)src, (copy_count * unit_size_));
173
174                 used_samples_ += copy_count;
175                 unused_samples_ -= copy_count;
176                 remaining_samples -= copy_count;
177                 data_offset += (copy_count * unit_size_);
178
179                 if (unused_samples_ == 0) {
180                         // If we're out of memory, this will throw std::bad_alloc
181                         current_chunk_ = new uint8_t[chunk_size_];
182                         data_chunks_.push_back(current_chunk_);
183                         used_samples_ = 0;
184                         unused_samples_ = chunk_size_ / unit_size_;
185                 }
186         } while (remaining_samples > 0);
187
188         sample_count_ += samples;
189 }
190
191 void Segment::get_raw_samples(uint64_t start, uint64_t count,
192         uint8_t* dest) const
193 {
194         assert(start < sample_count_);
195         assert(start + count <= sample_count_);
196         assert(count > 0);
197         assert(dest != nullptr);
198
199         lock_guard<recursive_mutex> lock(mutex_);
200
201         uint8_t* dest_ptr = dest;
202
203         uint64_t chunk_num = (start * unit_size_) / chunk_size_;
204         uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
205
206         while (count > 0) {
207                 const uint8_t* chunk = data_chunks_[chunk_num];
208
209                 uint64_t copy_size = min(count * unit_size_,
210                         chunk_size_ - chunk_offs);
211
212                 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
213
214                 dest_ptr += copy_size;
215                 count -= (copy_size / unit_size_);
216
217                 chunk_num++;
218                 chunk_offs = 0;
219         }
220 }
221
222 SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start)
223 {
224         SegmentRawDataIterator* it = new SegmentRawDataIterator;
225
226         assert(start < sample_count_);
227
228         iterator_count_++;
229
230         it->sample_index = start;
231         it->chunk_num = (start * unit_size_) / chunk_size_;
232         it->chunk_offs = (start * unit_size_) % chunk_size_;
233         it->chunk = data_chunks_[it->chunk_num];
234         it->value = it->chunk + it->chunk_offs;
235
236         return it;
237 }
238
239 void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase)
240 {
241         // Fail gracefully if we are asked to deliver data we don't have
242         if (it->sample_index > sample_count_)
243                 return;
244
245         it->sample_index += increase;
246         it->chunk_offs += (increase * unit_size_);
247
248         if (it->chunk_offs > (chunk_size_ - 1)) {
249                 it->chunk_num++;
250                 it->chunk_offs -= chunk_size_;
251                 it->chunk = data_chunks_[it->chunk_num];
252         }
253
254         it->value = it->chunk + it->chunk_offs;
255 }
256
257 void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it)
258 {
259         delete it;
260
261         iterator_count_--;
262
263         if ((iterator_count_ == 0) && mem_optimization_requested_) {
264                 mem_optimization_requested_ = false;
265                 free_unused_memory();
266         }
267 }
268
269 } // namespace data
270 } // namespace pv