]> sigrok.org Git - pulseview.git/blame_incremental - pv/data/segment.cpp
Free unused segment memory after acquisition
[pulseview.git] / pv / data / segment.cpp
... / ...
CommitLineData
1/*
2 * This file is part of the PulseView project.
3 *
4 * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5 * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "segment.hpp"
22
23#include <assert.h>
24#include <stdlib.h>
25#include <string.h>
26
27#include <vector>
28
29using std::lock_guard;
30using std::recursive_mutex;
31using std::vector;
32
33namespace pv {
34namespace data {
35
36Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
37 sample_count_(0),
38 start_time_(0),
39 samplerate_(samplerate),
40 unit_size_(unit_size)
41{
42 lock_guard<recursive_mutex> lock(mutex_);
43 assert(unit_size_ > 0);
44
45 // Determine the number of samples we can fit in one chunk
46 // without exceeding MaxChunkSize
47 chunk_size_ = std::min(MaxChunkSize,
48 (MaxChunkSize / unit_size_) * unit_size_);
49
50 // Create the initial chunk
51 current_chunk_ = new uint8_t[chunk_size_];
52 data_chunks_.push_back(current_chunk_);
53 used_samples_ = 0;
54 unused_samples_ = chunk_size_ / unit_size_;
55}
56
57Segment::~Segment()
58{
59 lock_guard<recursive_mutex> lock(mutex_);
60
61 for (uint8_t* chunk : data_chunks_)
62 delete[] chunk;
63}
64
65uint64_t Segment::get_sample_count() const
66{
67 lock_guard<recursive_mutex> lock(mutex_);
68 return sample_count_;
69}
70
71const pv::util::Timestamp& Segment::start_time() const
72{
73 return start_time_;
74}
75
76double Segment::samplerate() const
77{
78 return samplerate_;
79}
80
81void Segment::set_samplerate(double samplerate)
82{
83 samplerate_ = samplerate;
84}
85
86unsigned int Segment::unit_size() const
87{
88 return unit_size_;
89}
90
91void Segment::free_unused_memory()
92{
93 lock_guard<recursive_mutex> lock(mutex_);
94
95 // No more data will come in, so re-create the last chunk accordingly
96 uint8_t* resized_chunk = new uint8_t[used_samples_ * unit_size_];
97 memcpy(resized_chunk, current_chunk_, used_samples_ * unit_size_);
98
99 delete[] current_chunk_;
100 current_chunk_ = resized_chunk;
101
102 data_chunks_.pop_back();
103 data_chunks_.push_back(resized_chunk);
104}
105
106void Segment::append_single_sample(void *data)
107{
108 lock_guard<recursive_mutex> lock(mutex_);
109
110 // There will always be space for at least one sample in
111 // the current chunk, so we do not need to test for space
112
113 memcpy(current_chunk_ + (used_samples_ * unit_size_),
114 data, unit_size_);
115 used_samples_++;
116 unused_samples_--;
117
118 if (unused_samples_ == 0) {
119 current_chunk_ = new uint8_t[chunk_size_];
120 data_chunks_.push_back(current_chunk_);
121 used_samples_ = 0;
122 unused_samples_ = chunk_size_ / unit_size_;
123 }
124
125 sample_count_++;
126}
127
128void Segment::append_samples(void* data, uint64_t samples)
129{
130 lock_guard<recursive_mutex> lock(mutex_);
131
132 if (unused_samples_ >= samples) {
133 // All samples fit into the current chunk
134 memcpy(current_chunk_ + (used_samples_ * unit_size_),
135 data, (samples * unit_size_));
136 used_samples_ += samples;
137 unused_samples_ -= samples;
138 } else {
139 // Only a part of the samples fit, split data up between chunks
140 memcpy(current_chunk_ + (used_samples_ * unit_size_),
141 data, (unused_samples_ * unit_size_));
142 const uint64_t remaining_samples = samples - unused_samples_;
143
144 // If we're out of memory, this will throw std::bad_alloc
145 current_chunk_ = new uint8_t[chunk_size_];
146 data_chunks_.push_back(current_chunk_);
147 memcpy(current_chunk_, (uint8_t*)data + (unused_samples_ * unit_size_),
148 (remaining_samples * unit_size_));
149
150 used_samples_ = remaining_samples;
151 unused_samples_ = (chunk_size_ / unit_size_) - remaining_samples;
152 }
153
154 if (unused_samples_ == 0) {
155 // If we're out of memory, this will throw std::bad_alloc
156 current_chunk_ = new uint8_t[chunk_size_];
157 data_chunks_.push_back(current_chunk_);
158 used_samples_ = 0;
159 unused_samples_ = chunk_size_ / unit_size_;
160 }
161
162 sample_count_ += samples;
163}
164
165uint8_t* Segment::get_raw_samples(uint64_t start, uint64_t count) const
166{
167 assert(start < sample_count_);
168 assert(start + count <= sample_count_);
169 assert(count > 0);
170
171 lock_guard<recursive_mutex> lock(mutex_);
172
173 uint8_t* dest = new uint8_t[count * unit_size_];
174 uint8_t* dest_ptr = dest;
175
176 uint64_t chunk_num = (start * unit_size_) / chunk_size_;
177 uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
178
179 while (count > 0) {
180 const uint8_t* chunk = data_chunks_[chunk_num];
181
182 uint64_t copy_size = std::min(count * unit_size_,
183 chunk_size_ - chunk_offs);
184
185 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
186
187 dest_ptr += copy_size;
188 count -= (copy_size / unit_size_);
189
190 chunk_num++;
191 chunk_offs = 0;
192 }
193
194 return dest;
195}
196
197SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start) const
198{
199 SegmentRawDataIterator* it = new SegmentRawDataIterator;
200
201 assert(start < sample_count_);
202
203 it->sample_index = start;
204 it->chunk_num = (start * unit_size_) / chunk_size_;
205 it->chunk_offs = (start * unit_size_) % chunk_size_;
206 it->chunk = data_chunks_[it->chunk_num];
207 it->value = it->chunk + it->chunk_offs;
208
209 return it;
210}
211
212void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase) const
213{
214 lock_guard<recursive_mutex> lock(mutex_);
215
216 if (it->sample_index > sample_count_)
217 {
218 // Fail gracefully if we are asked to deliver data we don't have
219 return;
220 } else {
221 it->sample_index += increase;
222 it->chunk_offs += (increase * unit_size_);
223 }
224
225 if (it->chunk_offs > (chunk_size_ - 1)) {
226 it->chunk_num++;
227 it->chunk_offs -= chunk_size_;
228 it->chunk = data_chunks_[it->chunk_num];
229 }
230
231 it->value = it->chunk + it->chunk_offs;
232}
233
234void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it) const
235{
236 delete it;
237}
238
239
240} // namespace data
241} // namespace pv