]> sigrok.org Git - pulseview.git/blob - pv/data/segment.cpp
88015c3639860597accf8bf23eb21ed98d499e53
[pulseview.git] / pv / data / segment.cpp
1 /*
2  * This file is part of the PulseView project.
3  *
4  * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5  * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include "segment.hpp"
22
23 #include <cassert>
24 #include <cstdlib>
25 #include <cstring>
26
27 using std::lock_guard;
28 using std::min;
29 using std::recursive_mutex;
30
31 namespace pv {
32 namespace data {
33
34 const uint64_t Segment::MaxChunkSize = 10*1024*1024;  /* 10MiB */
35
36 Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
37         sample_count_(0),
38         start_time_(0),
39         samplerate_(samplerate),
40         unit_size_(unit_size),
41         iterator_count_(0),
42         mem_optimization_requested_(false)
43 {
44         lock_guard<recursive_mutex> lock(mutex_);
45         assert(unit_size_ > 0);
46
47         // Determine the number of samples we can fit in one chunk
48         // without exceeding MaxChunkSize
49         chunk_size_ = min(MaxChunkSize, (MaxChunkSize / unit_size_) * unit_size_);
50
51         // Create the initial chunk
52         current_chunk_ = new uint8_t[chunk_size_];
53         data_chunks_.push_back(current_chunk_);
54         used_samples_ = 0;
55         unused_samples_ = chunk_size_ / unit_size_;
56 }
57
58 Segment::~Segment()
59 {
60         lock_guard<recursive_mutex> lock(mutex_);
61
62         for (uint8_t* chunk : data_chunks_)
63                 delete[] chunk;
64 }
65
66 uint64_t Segment::get_sample_count() const
67 {
68         lock_guard<recursive_mutex> lock(mutex_);
69         return sample_count_;
70 }
71
72 const pv::util::Timestamp& Segment::start_time() const
73 {
74         return start_time_;
75 }
76
77 double Segment::samplerate() const
78 {
79         return samplerate_;
80 }
81
82 void Segment::set_samplerate(double samplerate)
83 {
84         samplerate_ = samplerate;
85 }
86
87 unsigned int Segment::unit_size() const
88 {
89         return unit_size_;
90 }
91
92 void Segment::free_unused_memory()
93 {
94         lock_guard<recursive_mutex> lock(mutex_);
95
96         // Do not mess with the data chunks if we have iterators pointing at them
97         if (iterator_count_ > 0) {
98                 mem_optimization_requested_ = true;
99                 return;
100         }
101
102         // No more data will come in, so re-create the last chunk accordingly
103         uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
104         memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
105
106         delete[] current_chunk_;
107         current_chunk_ = resized_chunk;
108
109         data_chunks_.pop_back();
110         data_chunks_.push_back(resized_chunk);
111 }
112
113 void Segment::append_single_sample(void *data)
114 {
115         lock_guard<recursive_mutex> lock(mutex_);
116
117         // There will always be space for at least one sample in
118         // the current chunk, so we do not need to test for space
119
120         memcpy(current_chunk_ + (used_samples_ * unit_size_), data, unit_size_);
121         used_samples_++;
122         unused_samples_--;
123
124         if (unused_samples_ == 0) {
125                 current_chunk_ = new uint8_t[chunk_size_];
126                 data_chunks_.push_back(current_chunk_);
127                 used_samples_ = 0;
128                 unused_samples_ = chunk_size_ / unit_size_;
129         }
130
131         sample_count_++;
132 }
133
134 void Segment::append_samples(void* data, uint64_t samples)
135 {
136         lock_guard<recursive_mutex> lock(mutex_);
137
138         if (unused_samples_ >= samples) {
139                 // All samples fit into the current chunk
140                 memcpy(current_chunk_ + (used_samples_ * unit_size_),
141                         data, (samples * unit_size_));
142                 used_samples_ += samples;
143                 unused_samples_ -= samples;
144         } else {
145                 // Only a part of the samples fit, split data up between chunks
146                 memcpy(current_chunk_ + (used_samples_ * unit_size_),
147                         data, (unused_samples_ * unit_size_));
148                 const uint64_t remaining_samples = samples - unused_samples_;
149
150                 // If we're out of memory, this will throw std::bad_alloc
151                 current_chunk_ = new uint8_t[chunk_size_];
152                 data_chunks_.push_back(current_chunk_);
153                 memcpy(current_chunk_, (uint8_t*)data + (unused_samples_ * unit_size_),
154                         (remaining_samples * unit_size_));
155
156                 used_samples_ = remaining_samples;
157                 unused_samples_ = (chunk_size_ / unit_size_) - remaining_samples;
158         }
159
160         if (unused_samples_ == 0) {
161                 // If we're out of memory, this will throw std::bad_alloc
162                 current_chunk_ = new uint8_t[chunk_size_];
163                 data_chunks_.push_back(current_chunk_);
164                 used_samples_ = 0;
165                 unused_samples_ = chunk_size_ / unit_size_;
166         }
167
168         sample_count_ += samples;
169 }
170
171 uint8_t* Segment::get_raw_samples(uint64_t start, uint64_t count) const
172 {
173         assert(start < sample_count_);
174         assert(start + count <= sample_count_);
175         assert(count > 0);
176
177         lock_guard<recursive_mutex> lock(mutex_);
178
179         uint8_t* dest = new uint8_t[count * unit_size_];
180         uint8_t* dest_ptr = dest;
181
182         uint64_t chunk_num = (start * unit_size_) / chunk_size_;
183         uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
184
185         while (count > 0) {
186                 const uint8_t* chunk = data_chunks_[chunk_num];
187
188                 uint64_t copy_size = min(count * unit_size_,
189                         chunk_size_ - chunk_offs);
190
191                 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
192
193                 dest_ptr += copy_size;
194                 count -= (copy_size / unit_size_);
195
196                 chunk_num++;
197                 chunk_offs = 0;
198         }
199
200         return dest;
201 }
202
203 SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start)
204 {
205         SegmentRawDataIterator* it = new SegmentRawDataIterator;
206
207         assert(start < sample_count_);
208
209         iterator_count_++;
210
211         it->sample_index = start;
212         it->chunk_num = (start * unit_size_) / chunk_size_;
213         it->chunk_offs = (start * unit_size_) % chunk_size_;
214         it->chunk = data_chunks_[it->chunk_num];
215         it->value = it->chunk + it->chunk_offs;
216
217         return it;
218 }
219
220 void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase)
221 {
222         lock_guard<recursive_mutex> lock(mutex_);
223
224         // Fail gracefully if we are asked to deliver data we don't have
225         if (it->sample_index > sample_count_)
226                 return;
227
228         it->sample_index += increase;
229         it->chunk_offs += (increase * unit_size_);
230
231         if (it->chunk_offs > (chunk_size_ - 1)) {
232                 it->chunk_num++;
233                 it->chunk_offs -= chunk_size_;
234                 it->chunk = data_chunks_[it->chunk_num];
235         }
236
237         it->value = it->chunk + it->chunk_offs;
238 }
239
240 void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it)
241 {
242         delete it;
243
244         iterator_count_--;
245
246         if ((iterator_count_ == 0) && mem_optimization_requested_) {
247                 mem_optimization_requested_ = false;
248                 free_unused_memory();
249         }
250 }
251
252 } // namespace data
253 } // namespace pv