]> sigrok.org Git - pulseview.git/blob - pv/data/segment.cpp
Segment: Move definition of MaxChunkSize
[pulseview.git] / pv / data / segment.cpp
1 /*
2  * This file is part of the PulseView project.
3  *
4  * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5  * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include "segment.hpp"
22
23 #include <assert.h>
24 #include <stdlib.h>
25 #include <string.h>
26
27 #include <vector>
28
29 using std::lock_guard;
30 using std::recursive_mutex;
31 using std::vector;
32
33 namespace pv {
34 namespace data {
35
36 const uint64_t Segment::MaxChunkSize = 10*1024*1024;  /* 10MiB */
37
38 Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
39         sample_count_(0),
40         start_time_(0),
41         samplerate_(samplerate),
42         unit_size_(unit_size)
43 {
44         lock_guard<recursive_mutex> lock(mutex_);
45         assert(unit_size_ > 0);
46
47         // Determine the number of samples we can fit in one chunk
48         // without exceeding MaxChunkSize
49         chunk_size_ = std::min(MaxChunkSize,
50                 (MaxChunkSize / unit_size_) * unit_size_);
51
52         // Create the initial chunk
53         current_chunk_ = new uint8_t[chunk_size_];
54         data_chunks_.push_back(current_chunk_);
55         used_samples_ = 0;
56         unused_samples_ = chunk_size_ / unit_size_;
57 }
58
59 Segment::~Segment()
60 {
61         lock_guard<recursive_mutex> lock(mutex_);
62
63         for (uint8_t* chunk : data_chunks_)
64                 delete[] chunk;
65 }
66
67 uint64_t Segment::get_sample_count() const
68 {
69         lock_guard<recursive_mutex> lock(mutex_);
70         return sample_count_;
71 }
72
73 const pv::util::Timestamp& Segment::start_time() const
74 {
75         return start_time_;
76 }
77
78 double Segment::samplerate() const
79 {
80         return samplerate_;
81 }
82
83 void Segment::set_samplerate(double samplerate)
84 {
85         samplerate_ = samplerate;
86 }
87
88 unsigned int Segment::unit_size() const
89 {
90         return unit_size_;
91 }
92
93 void Segment::free_unused_memory()
94 {
95         lock_guard<recursive_mutex> lock(mutex_);
96
97         // No more data will come in, so re-create the last chunk accordingly
98         uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
99         memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
100
101         delete[] current_chunk_;
102         current_chunk_ = resized_chunk;
103
104         data_chunks_.pop_back();
105         data_chunks_.push_back(resized_chunk);
106 }
107
108 void Segment::append_single_sample(void *data)
109 {
110         lock_guard<recursive_mutex> lock(mutex_);
111
112         // There will always be space for at least one sample in
113         // the current chunk, so we do not need to test for space
114
115         memcpy(current_chunk_ + (used_samples_ * unit_size_),
116                 data, unit_size_);
117         used_samples_++;
118         unused_samples_--;
119
120         if (unused_samples_ == 0) {
121                 current_chunk_ = new uint8_t[chunk_size_];
122                 data_chunks_.push_back(current_chunk_);
123                 used_samples_ = 0;
124                 unused_samples_ = chunk_size_ / unit_size_;
125         }
126
127         sample_count_++;
128 }
129
130 void Segment::append_samples(void* data, uint64_t samples)
131 {
132         lock_guard<recursive_mutex> lock(mutex_);
133
134         if (unused_samples_ >= samples) {
135                 // All samples fit into the current chunk
136                 memcpy(current_chunk_ + (used_samples_ * unit_size_),
137                         data, (samples * unit_size_));
138                 used_samples_ += samples;
139                 unused_samples_ -= samples;
140         } else {
141                 // Only a part of the samples fit, split data up between chunks
142                 memcpy(current_chunk_ + (used_samples_ * unit_size_),
143                         data, (unused_samples_ * unit_size_));
144                 const uint64_t remaining_samples = samples - unused_samples_;
145
146                 // If we're out of memory, this will throw std::bad_alloc
147                 current_chunk_ = new uint8_t[chunk_size_];
148                 data_chunks_.push_back(current_chunk_);
149                 memcpy(current_chunk_, (uint8_t*)data + (unused_samples_ * unit_size_),
150                         (remaining_samples * unit_size_));
151
152                 used_samples_ = remaining_samples;
153                 unused_samples_ = (chunk_size_ / unit_size_) - remaining_samples;
154         }
155
156         if (unused_samples_ == 0) {
157                 // If we're out of memory, this will throw std::bad_alloc
158                 current_chunk_ = new uint8_t[chunk_size_];
159                 data_chunks_.push_back(current_chunk_);
160                 used_samples_ = 0;
161                 unused_samples_ = chunk_size_ / unit_size_;
162         }
163
164         sample_count_ += samples;
165 }
166
167 uint8_t* Segment::get_raw_samples(uint64_t start, uint64_t count) const
168 {
169         assert(start < sample_count_);
170         assert(start + count <= sample_count_);
171         assert(count > 0);
172
173         lock_guard<recursive_mutex> lock(mutex_);
174
175         uint8_t* dest = new uint8_t[count * unit_size_];
176         uint8_t* dest_ptr = dest;
177
178         uint64_t chunk_num = (start * unit_size_) / chunk_size_;
179         uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
180
181         while (count > 0) {
182                 const uint8_t* chunk = data_chunks_[chunk_num];
183
184                 uint64_t copy_size = std::min(count * unit_size_,
185                         chunk_size_ - chunk_offs);
186
187                 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
188
189                 dest_ptr += copy_size;
190                 count -= (copy_size / unit_size_);
191
192                 chunk_num++;
193                 chunk_offs = 0;
194         }
195
196         return dest;
197 }
198
199 SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start) const
200 {
201         SegmentRawDataIterator* it = new SegmentRawDataIterator;
202
203         assert(start < sample_count_);
204
205         it->sample_index = start;
206         it->chunk_num = (start * unit_size_) / chunk_size_;
207         it->chunk_offs = (start * unit_size_) % chunk_size_;
208         it->chunk = data_chunks_[it->chunk_num];
209         it->value = it->chunk + it->chunk_offs;
210
211         return it;
212 }
213
214 void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase) const
215 {
216         lock_guard<recursive_mutex> lock(mutex_);
217
218         if (it->sample_index > sample_count_)
219         {
220                 // Fail gracefully if we are asked to deliver data we don't have
221                 return;
222         } else {
223                 it->sample_index += increase;
224                 it->chunk_offs += (increase * unit_size_);
225         }
226
227         if (it->chunk_offs > (chunk_size_ - 1)) {
228                 it->chunk_num++;
229                 it->chunk_offs -= chunk_size_;
230                 it->chunk = data_chunks_[it->chunk_num];
231         }
232
233         it->value = it->chunk + it->chunk_offs;
234 }
235
236 void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it) const
237 {
238         delete it;
239 }
240
241
242 } // namespace data
243 } // namespace pv