]> sigrok.org Git - pulseview.git/blob - pv/data/segment.cpp
f635fc387a59dc4156273ca0dd4d88602ca35a27
[pulseview.git] / pv / data / segment.cpp
1 /*
2  * This file is part of the PulseView project.
3  *
4  * Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
5  * Copyright (C) 2012 Joel Holdsworth <joel@airwebreathe.org.uk>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include "segment.hpp"
22
23 #include <assert.h>
24 #include <stdlib.h>
25 #include <string.h>
26
27 #include <vector>
28
29 using std::lock_guard;
30 using std::recursive_mutex;
31 using std::vector;
32
33 namespace pv {
34 namespace data {
35
36 Segment::Segment(uint64_t samplerate, unsigned int unit_size) :
37         sample_count_(0),
38         start_time_(0),
39         samplerate_(samplerate),
40         unit_size_(unit_size)
41 {
42         lock_guard<recursive_mutex> lock(mutex_);
43         assert(unit_size_ > 0);
44
45         // Determine the number of samples we can fit in one chunk
46         // without exceeding MaxChunkSize
47         chunk_size_ = std::min(MaxChunkSize,
48                 (MaxChunkSize / unit_size_) * unit_size_);
49
50         // Create the initial chunk
51         current_chunk_ = new uint8_t[chunk_size_];
52         data_chunks_.push_back(current_chunk_);
53         used_samples_ = 0;
54         unused_samples_ = chunk_size_ / unit_size_;
55 }
56
57 Segment::~Segment()
58 {
59         lock_guard<recursive_mutex> lock(mutex_);
60
61         for (uint8_t* chunk : data_chunks_)
62                 delete[] chunk;
63 }
64
65 uint64_t Segment::get_sample_count() const
66 {
67         lock_guard<recursive_mutex> lock(mutex_);
68         return sample_count_;
69 }
70
71 const pv::util::Timestamp& Segment::start_time() const
72 {
73         return start_time_;
74 }
75
76 double Segment::samplerate() const
77 {
78         return samplerate_;
79 }
80
81 void Segment::set_samplerate(double samplerate)
82 {
83         samplerate_ = samplerate;
84 }
85
86 unsigned int Segment::unit_size() const
87 {
88         return unit_size_;
89 }
90
91 void Segment::append_single_sample(void *data)
92 {
93         lock_guard<recursive_mutex> lock(mutex_);
94
95         // There will always be space for at least one sample in
96         // the current chunk, so we do not need to test for space
97
98         memcpy(current_chunk_ + (used_samples_ * unit_size_),
99                 data, unit_size_);
100         used_samples_++;
101         unused_samples_--;
102
103         if (unused_samples_ == 0) {
104                 current_chunk_ = new uint8_t[chunk_size_];
105                 data_chunks_.push_back(current_chunk_);
106                 used_samples_ = 0;
107                 unused_samples_ = chunk_size_ / unit_size_;
108         }
109
110         sample_count_++;
111 }
112
113 void Segment::append_samples(void* data, uint64_t samples)
114 {
115         lock_guard<recursive_mutex> lock(mutex_);
116
117         if (unused_samples_ >= samples) {
118                 // All samples fit into the current chunk
119                 memcpy(current_chunk_ + (used_samples_ * unit_size_),
120                         data, (samples * unit_size_));
121                 used_samples_ += samples;
122                 unused_samples_ -= samples;
123         } else {
124                 // Only a part of the samples fit, split data up between chunks
125                 memcpy(current_chunk_ + (used_samples_ * unit_size_),
126                         data, (unused_samples_ * unit_size_));
127                 const uint64_t remaining_samples = samples - unused_samples_;
128
129                 // If we're out of memory, this will throw std::bad_alloc
130                 current_chunk_ = new uint8_t[chunk_size_];
131                 data_chunks_.push_back(current_chunk_);
132                 memcpy(current_chunk_, (uint8_t*)data + (unused_samples_ * unit_size_),
133                         (remaining_samples * unit_size_));
134
135                 used_samples_ = remaining_samples;
136                 unused_samples_ = (chunk_size_ / unit_size_) - remaining_samples;
137         }
138
139         if (unused_samples_ == 0) {
140                 // If we're out of memory, this will throw std::bad_alloc
141                 current_chunk_ = new uint8_t[chunk_size_];
142                 data_chunks_.push_back(current_chunk_);
143                 used_samples_ = 0;
144                 unused_samples_ = chunk_size_ / unit_size_;
145         }
146
147         sample_count_ += samples;
148 }
149
150 uint8_t* Segment::get_raw_samples(uint64_t start, uint64_t count) const
151 {
152         assert(start < sample_count_);
153         assert(start + count <= sample_count_);
154         assert(count > 0);
155
156         lock_guard<recursive_mutex> lock(mutex_);
157
158         uint8_t* dest = new uint8_t[count * unit_size_];
159         uint8_t* dest_ptr = dest;
160
161         uint64_t chunk_num = (start * unit_size_) / chunk_size_;
162         uint64_t chunk_offs = (start * unit_size_) % chunk_size_;
163
164         while (count > 0) {
165                 const uint8_t* chunk = data_chunks_[chunk_num];
166
167                 uint64_t copy_size = std::min(count * unit_size_,
168                         chunk_size_ - chunk_offs);
169
170                 memcpy(dest_ptr, chunk + chunk_offs, copy_size);
171
172                 dest_ptr += copy_size;
173                 count -= (copy_size / unit_size_);
174
175                 chunk_num++;
176                 chunk_offs = 0;
177         }
178
179         return dest;
180 }
181
182 SegmentRawDataIterator* Segment::begin_raw_sample_iteration(uint64_t start) const
183 {
184         SegmentRawDataIterator* it = new SegmentRawDataIterator;
185
186         assert(start < sample_count_);
187
188         it->sample_index = start;
189         it->chunk_num = (start * unit_size_) / chunk_size_;
190         it->chunk_offs = (start * unit_size_) % chunk_size_;
191         it->chunk = data_chunks_[it->chunk_num];
192         it->value = it->chunk + it->chunk_offs;
193
194         return it;
195 }
196
197 void Segment::continue_raw_sample_iteration(SegmentRawDataIterator* it, uint64_t increase) const
198 {
199         lock_guard<recursive_mutex> lock(mutex_);
200
201         if (it->sample_index > sample_count_)
202         {
203                 // Fail gracefully if we are asked to deliver data we don't have
204                 return;
205         } else {
206                 it->sample_index += increase;
207                 it->chunk_offs += (increase * unit_size_);
208         }
209
210         if (it->chunk_offs > (chunk_size_ - 1)) {
211                 it->chunk_num++;
212                 it->chunk_offs -= chunk_size_;
213                 it->chunk = data_chunks_[it->chunk_num];
214         }
215
216         it->value = it->chunk + it->chunk_offs;
217 }
218
219 void Segment::end_raw_sample_iteration(SegmentRawDataIterator* it) const
220 {
221         delete it;
222 }
223
224
225 } // namespace data
226 } // namespace pv