sparse_matrix.hpp
Go to the documentation of this file.
1/*!
2 * \brief Implementation of the sparse matrix class
3 *
4 * \author O. Krause
5 * \date 2017
6 *
7 *
8 * \par Copyright 1995-2015 Shark Development Team
9 *
10 * <BR><HR>
11 * This file is part of Shark.
12 * <http://image.diku.dk/shark/>
13 *
14 * Shark is free software: you can redistribute it and/or modify
15 * it under the terms of the GNU Lesser General Public License as published
16 * by the Free Software Foundation, either version 3 of the License, or
17 * (at your option) any later version.
18 *
19 * Shark is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU Lesser General Public License for more details.
23 *
24 * You should have received a copy of the GNU Lesser General Public License
25 * along with Shark. If not, see <http://www.gnu.org/licenses/>.
26 *
27 */
28#ifndef REMORA_CPU_SPARSE_MATRIX_HPP
29#define REMORA_CPU_SPARSE_MATRIX_HPP
30
31namespace remora{namespace detail{
32
33template<class T, class I>
34struct MatrixStorage{
35 typedef sparse_matrix_storage<T,I> storage_type;
36 typedef I size_type;
37 typedef T value_type;
38
39 MatrixStorage(size_type major_size, size_type minor_size)
40 : m_major_indices_begin(major_size + 1,0)
41 , m_major_indices_end(major_size,0)
42 , m_minor_size(minor_size)
43 {}
44
45 storage_type reserve(size_type non_zeros){
46 if(non_zeros > m_indices.size()){
47 m_indices.resize(non_zeros);
48 m_values.resize(non_zeros);
49 }
50 return {m_values.data(), m_indices.data(), m_major_indices_begin.data(), m_major_indices_end.data(), m_indices.size()};
51 }
52
53 storage_type resize(size_type major_size){
54 m_major_indices_begin.resize(major_size + 1);
55 m_major_indices_end.resize(major_size);
56 return reserve(m_indices.size());
57 }
58
59 size_type major_size()const{
60 return m_major_indices_end.size();
61 }
62
63 size_type minor_size()const{
64 return m_minor_size;
65 }
66
67 template<class Archive>
68 void serialize(Archive& ar, const unsigned int /* file_version */) {
69 ar & boost::serialization::make_nvp("indices", m_indices);
70 ar & boost::serialization::make_nvp("values", m_values);
71 ar & boost::serialization::make_nvp("major_indices_begin", m_major_indices_begin);
72 ar & boost::serialization::make_nvp("major_indices_end", m_major_indices_end);
73 ar & boost::serialization::make_nvp("minor_size", m_minor_size);
74 }
75private:
76 std::vector<I> m_major_indices_begin;
77 std::vector<I> m_major_indices_end;
78 std::vector<T> m_values;
79 std::vector<I> m_indices;
80 size_type m_minor_size;
81};
82template<class StorageManager>
83class compressed_matrix_impl{
84public:
85 typedef typename StorageManager::storage_type storage_type;
86 typedef typename StorageManager::size_type size_type;
87 typedef typename StorageManager::value_type value_type;
88
89 compressed_matrix_impl(StorageManager const& manager, size_type nnz = 0)
90 : m_manager(manager)
91 , m_storage(m_manager.reserve(nnz)){};
92
93 compressed_matrix_impl(compressed_matrix_impl const& impl)
94 : m_manager(impl.m_manager)
95 , m_storage(m_manager.reserve(impl.nnz_reserved())){};
96
97 compressed_matrix_impl(compressed_matrix_impl&& impl)
98 : m_manager(std::move(impl.m_manager))
99 , m_storage(m_manager.reserve(impl.nnz_reserved())){};
100
101 compressed_matrix_impl& operator=(compressed_matrix_impl const& impl){
102 m_manager = impl.m_manager;
103 m_storage = m_manager.reserve(impl.nnz_reserved());
104 }
105
106 compressed_matrix_impl& operator=(compressed_matrix_impl&& impl){
107 m_manager = std::move(impl.m_manager);
108 m_storage = m_manager.reserve(impl.nnz_reserved());
109 return *this;
110 }
111
112 // Accessors
113 size_type major_size() const {
114 return m_manager.major_size();
115 }
116 size_type minor_size() const {
117 return m_manager.minor_size();
118 }
119
120 storage_type const& raw_storage()const{
121 return m_storage;
122 }
123
124 /// \brief Maximum number of non-zeros that the matrix can store or reserve before memory needs to be reallocated
125 size_type nnz_capacity() const{
126 return m_storage.capacity;
127 }
128 /// \brief Size of reserved storage in the matrix (> number of nonzeros stored)
129 size_type nnz_reserved() const {
130 return m_storage.major_indices_begin[major_size()];
131 }
132 /// \brief Number of nonzeros the major index (a major or column depending on orientation) can maximally store before a resize
133 ///
134 /// It holds major_nnz <= major_capacity. Capacity can be increased via major_reserve. It is also increased automatically
135 /// when a new element is inserted and no more storage is available.
136 size_type major_capacity(size_type i)const{
137 REMORA_RANGE_CHECK(i < major_size());
138 return m_storage.major_indices_begin[i+1] - m_storage.major_indices_begin[i];
139 }
140 /// \brief Number of nonzeros the major index (a major or column depending on orientation) currently stores
141 ///
142 /// This is <= major_capacity.
143 size_type major_nnz(size_type i) const {
144 return m_storage.major_indices_end[i] - m_storage.major_indices_begin[i];
145 }
146
147 /// \brief Set the number of nonzeros stored in the major index (a major or column depending on orientation)
148 ///
149 /// This is a semi-internal function and can be used after a change to the underlying storage occured.
150 void set_major_nnz(size_type i,size_type non_zeros) {
151 REMORA_SIZE_CHECK(i < major_size());
152 REMORA_SIZE_CHECK(non_zeros <= major_capacity(i));
153 m_storage.major_indices_end[i] = m_storage.major_indices_begin[i]+non_zeros;
154 }
155
156 void reserve(size_type non_zeros) {
157 if (non_zeros < nnz_capacity()) return;
158 m_storage = m_manager.reserve(non_zeros);
159 }
160
161 /// \brief Reserves space for a given row or column.
162 ///
163 /// Note that all rows are stored in the same array, expanding the storage of a row
164 /// leads to a reordering of the whole matrix and all iterators are invaldiated.
165 /// To make frequent reservation unlikely, the optional third argument will add more
166 /// space additionally. e.g. capacity is at least increased by a factor of 2.
167 void major_reserve(size_type i, size_type non_zeros, bool exact_size = false) {
168 REMORA_RANGE_CHECK(i < major_size());
169 non_zeros = std::min(minor_size(),non_zeros);
170 size_type current_capacity = major_capacity(i);
171 if (non_zeros <= current_capacity) return;
172 size_type space_difference = non_zeros - current_capacity;
173
174 //check if there is place in the end of the container to store the elements
175 if (space_difference > nnz_capacity() - nnz_reserved()){
176 size_type exact = nnz_capacity() + space_difference;
177 size_type spaceous = std::max(2*nnz_capacity(),nnz_capacity() + 2*space_difference);
178 reserve(exact_size? exact:spaceous);
179 }
180 //move the elements of the next majors to make room for the reserved space
181 for (size_type k = major_size()-1; k != i; --k) {
182 value_type* values = m_storage.values + m_storage.major_indices_begin[k];
183 value_type* values_end = m_storage.values + m_storage.major_indices_end[k];
184 size_type* indices = m_storage.indices + m_storage.major_indices_begin[k];
185 size_type* indices_end = m_storage.indices + m_storage.major_indices_end[k];
186 std::copy_backward(values, values_end, values_end + space_difference);
187 std::copy_backward(indices, indices_end, indices_end + space_difference);
188 m_storage.major_indices_begin[k] += space_difference;
189 m_storage.major_indices_end[k] += space_difference;
190 }
191 m_storage.major_indices_begin[major_size()] += space_difference;
192 }
193
194 void resize(size_type major, size_type minor){
195 m_storage = m_manager.resize(major);
196 m_minor_size = minor;
197 }
198
199 typedef iterators::compressed_storage_iterator<value_type const, size_type const> const_major_iterator;
200 typedef iterators::compressed_storage_iterator<value_type, size_type> major_iterator;
201
202 const_major_iterator cmajor_begin(size_type i) const {
203 REMORA_SIZE_CHECK(i < major_size());
204 REMORA_RANGE_CHECK(m_storage.major_indices_begin[i] <= m_storage.major_indices_end[i]);//internal check
205 return const_major_iterator(m_storage.values, m_storage.indices, m_storage.major_indices_begin[i],i);
206 }
207
208 const_major_iterator cmajor_end(size_type i) const {
209 REMORA_SIZE_CHECK(i < major_size());
210 REMORA_RANGE_CHECK(m_storage.major_indices_begin[i] <= m_storage.major_indices_end[i]);//internal check
211 return const_major_iterator(m_storage.values, m_storage.indices, m_storage.major_indices_end[i],i);
212 }
213
214 major_iterator major_begin(size_type i) {
215 REMORA_SIZE_CHECK(i < major_size());
216 REMORA_RANGE_CHECK(m_storage.major_indices_begin[i] <= m_storage.major_indices_end[i]);//internal check
217 return major_iterator(m_storage.values, m_storage.indices, m_storage.major_indices_begin[i],i);
218 }
219
220 major_iterator major_end(size_type i) {
221 REMORA_SIZE_CHECK(i < major_size());
222 REMORA_RANGE_CHECK(m_storage.major_indices_begin[i] <= m_storage.major_indices_end[i]);//internal check
223 return major_iterator(m_storage.values, m_storage.indices, m_storage.major_indices_end[i],i);
224 }
225
226 major_iterator set_element(major_iterator pos, size_type index, value_type value) {
227 size_type major_index = pos.major_index();
228 size_type line_pos = pos - major_begin(major_index);
229 REMORA_RANGE_CHECK(major_index < major_size());
230 REMORA_RANGE_CHECK(size_type(pos - major_begin(major_index)) <= major_nnz(major_index));
231 REMORA_RANGE_CHECK(pos == major_end(major_index) || pos.index() >= index);//correct ordering
232
233 //shortcut: element already exists.
234 if (pos != major_end(major_index) && pos.index() == index) {
235 *pos = value;
236 return pos + 1;
237 }
238
239 //get position of the element in the array.
240 std::ptrdiff_t arrayPos = line_pos + m_storage.major_indices_begin[major_index];
241
242
243 //check that there is enough space in the major. this invalidates pos.
244 if (major_capacity(major_index) == major_nnz(major_index))
245 major_reserve(major_index,std::max<size_type>(2*major_capacity(major_index),5));
246
247 //copy the remaining elements further to make room for the new element
248 std::copy_backward(
249 m_storage.values + arrayPos, m_storage.values + m_storage.major_indices_end[major_index],
250 m_storage.values + m_storage.major_indices_end[major_index] + 1
251 );
252 std::copy_backward(
253 m_storage.indices + arrayPos, m_storage.indices + m_storage.major_indices_end[major_index],
254 m_storage.indices + m_storage.major_indices_end[major_index] + 1
255 );
256 //insert new element
257 m_storage.values[arrayPos] = value;
258 m_storage.indices[arrayPos] = index;
259 ++m_storage.major_indices_end[major_index];
260
261 //return new iterator behind the inserted element.
262 return major_begin(major_index) + (line_pos + 1);
263
264 }
265
266 major_iterator clear_range(major_iterator start, major_iterator end) {
267 REMORA_RANGE_CHECK(start.major_index() == end.major_index());
268 size_type major_index = start.major_index();
269 size_type range_size = end - start;
270 size_type range_start = start - major_begin(major_index);
271 size_type range_end = range_start + range_size;
272
273 //get start of the storage of the row/column we are going to change
274 auto values = m_storage.values + m_storage.major_indices_begin[major_index];
275 auto indices = m_storage.indices + m_storage.major_indices_begin[major_index];
276
277 //remove the elements in the range by copying the elements after it to the start of the range
278 std::copy(values + range_end, values + major_nnz(major_index), values + range_start);
279 std::copy(indices + range_end, indices + major_nnz(major_index), indices + range_start);
280 //subtract number of removed elements
281 m_storage.major_indices_end[major_index] -= range_size;
282 //return new iterator to the first element after the end of the range
283 return major_begin(major_index) + range_start;
284 }
285
286 major_iterator clear_element(major_iterator elem) {
287 REMORA_RANGE_CHECK(elem != major_end());
288 return clear_range(elem,elem + 1);
289 }
290
291 template<class Archive>
292 void serialize(Archive& ar, const unsigned int){
293 ar & m_manager;
294 ar & m_minor_size;
295 if(Archive::is_loading::value)
296 m_storage = m_manager.reserve(0);
297 }
298
299private:
300 StorageManager m_manager;
301 storage_type m_storage;
302 size_type m_minor_size;
303};
304
305///\brief proxy handling: closure, transpose and rows of a given matrix
306template<class M, class Orientation>
307class compressed_matrix_proxy: public matrix_expression<compressed_matrix_proxy<M, Orientation>, cpu_tag>{
308private:
309 template<class,class> friend class compressed_matrix_proxy;
310public:
311 typedef typename M::size_type size_type;
312 typedef typename M::value_type value_type;
313 typedef typename M::const_reference const_reference;
314 typedef typename remora::reference<M>::type reference;
315
316 typedef compressed_matrix_proxy<M const, Orientation> const_closure_type;
317 typedef compressed_matrix_proxy<M, Orientation> closure_type;
318 typedef typename remora::storage<M>::type storage_type;
319 typedef typename M::const_storage_type const_storage_type;
320 typedef elementwise<sparse_tag> evaluation_category;
321 typedef Orientation orientation;
322
323
324 //conversion matrix->proxy
325 compressed_matrix_proxy(M& m): m_matrix(&m), m_major_start(0){
326 m_major_end = M::orientation::index_M(m.size1(),m.size2());
327 }
328
329 //rows/columns proxy
330 compressed_matrix_proxy(M& m, size_type start, size_type end): m_matrix(&m), m_major_start(start), m_major_end(end){}
331
332
333 //copy-ctor/const-conversion
334 compressed_matrix_proxy( compressed_matrix_proxy<typename std::remove_const<M>::type, Orientation> const& proxy)
335 :m_matrix(proxy.m_matrix), m_major_start(proxy.m_major_start), m_major_end(proxy.m_major_end){}
336
337 M& matrix() const{
338 return *m_matrix;
339 }
340
341 size_type start() const{
342 return m_major_start;
343 }
344 size_type end() const{
345 return m_major_end;
346 }
347
348 //assignment from different expression
349 template<class E>
350 compressed_matrix_proxy& operator=(matrix_expression<E, cpu_tag> const& e){
351 return assign(*this, typename matrix_temporary<E>::type(e));
352 }
353 compressed_matrix_proxy& operator=(compressed_matrix_proxy const& e){
354 return assign(*this, typename matrix_temporary<M>::type(e));
355 }
356
357 ///\brief Number of rows of the matrix
358 size_type size1() const {
359 return orientation::index_M( m_major_end - m_major_start, minor_size(*m_matrix));
360 }
361
362 ///\brief Number of columns of the matrix
363 size_type size2() const {
364 return orientation::index_m(m_major_end - m_major_start, minor_size(*m_matrix));
365 }
366
367 /// \brief Number of nonzeros the major index (a row or column depending on orientation) can maximally store before a resize
368 size_type major_capacity(size_type i)const{
369 return m_matrix->major_capacity(m_major_start + i);
370 }
371 /// \brief Number of nonzeros the major index (a row or column depending on orientation) currently stores
372 size_type major_nnz(size_type i) const {
373 return m_matrix->major_nnz(m_major_start + i);
374 }
375
376 storage_type raw_storage()const{
377 return m_matrix->raw_storage();
378 }
379
380 typename device_traits<cpu_tag>::queue_type& queue() const{
381 return m_matrix->queue();
382 }
383
384 void major_reserve(size_type i, size_type non_zeros, bool exact_size = false) {
385 m_matrix->major_reserve(m_major_start + i, non_zeros, exact_size);
386 }
387
388 typedef typename major_iterator<M>::type major_iterator;
389 typedef major_iterator const_major_iterator;
390
391 major_iterator major_begin(size_type i) const {
392 return m_matrix->major_begin(m_major_start + i);
393 }
394
395 major_iterator major_end(size_type i) const{
396 return m_matrix->major_end(m_major_start + i);
397 }
398
399 major_iterator set_element(major_iterator pos, size_type index, value_type value){
400 return m_matrix->set_element(pos, index, value);
401 }
402
403 major_iterator clear_range(major_iterator start, major_iterator end) {
404 return m_matrix->clear_range(start,end);
405 }
406
407 void clear() {
408 if(m_major_start == 0 && m_major_end == major_size(*m_matrix))
409 m_matrix->clear();
410 else for(size_type i = m_major_start; i != m_major_end; ++i)
411 m_matrix->clear_range(m_matrix -> major_begin(i), m_matrix -> major_end(i));
412 }
413private:
414 M* m_matrix;
415 size_type m_major_start;
416 size_type m_major_end;
417};
418
419
420
421template<class M>
422class compressed_matrix_row: public vector_expression<compressed_matrix_row<M>, cpu_tag>{
423private:
424 template<class> friend class compressed_matrix_row;
425public:
426 typedef typename closure<M>::type matrix_type;
427 typedef typename M::size_type size_type;
428 typedef typename M::value_type value_type;
429 typedef typename M::const_reference const_reference;
430 typedef typename remora::reference<M>::type reference;
431
432 typedef compressed_matrix_row<M const> const_closure_type;
433 typedef compressed_matrix_row closure_type;
434 typedef typename remora::storage<M>::type::template row_storage<row_major>::type storage_type;
435 typedef typename M::const_storage_type::template row_storage<row_major>::type const_storage_type;
436 typedef elementwise<sparse_tag> evaluation_category;
437
438 // Construction
439 compressed_matrix_row(matrix_type const& m, size_type i):m_matrix(m), m_row(i){}
440 //copy or conversion ctor non-const ->const proxy
441 template<class M2>
442 compressed_matrix_row(compressed_matrix_row<M2 > const& ref)
443 :m_matrix(ref.m_matrix), m_row(ref.m_row){}
444
445 //assignment from different expression
446 template<class E>
447 compressed_matrix_row& operator=(vector_expression<E, cpu_tag> const& e){
448 return assign(*this, typename vector_temporary<E>::type(e));
449 }
450
451 compressed_matrix_row& operator=(compressed_matrix_row const& e){
452 return assign(*this, typename vector_temporary<M>::type(e));
453 }
454
455 ///\brief Returns the underlying storage structure for low level access
456 storage_type raw_storage() const{
457 return m_matrix.raw_storage().row(m_row, row_major());
458 }
459
460 /// \brief Return the size of the vector
461 size_type size() const {
462 return m_matrix.size2();
463 }
464
465 size_type nnz() const {
466 return m_matrix.major_nnz(m_row);
467 }
468
469 size_type nnz_capacity(){
470 return m_matrix.major_capacity(m_row);
471 }
472
473 void reserve(size_type non_zeros) {
474 m_matrix.major_reserve(m_row, non_zeros);
475 }
476
477 typename device_traits<cpu_tag>::queue_type& queue(){
478 return device_traits<cpu_tag>::default_queue();
479 }
480
481 // --------------
482 // ITERATORS
483 // --------------
484
485 typedef typename major_iterator<matrix_type>::type iterator;
486 typedef iterator const_iterator;
487
488 /// \brief return an iterator tp the first non-zero element of the vector
489 iterator begin() const {
490 return m_matrix.major_begin(m_row);
491 }
492
493 /// \brief return an iterator behind the last non-zero element of the vector
494 iterator end() const {
495 return m_matrix.major_end(m_row);
496 }
497
498 iterator set_element(iterator pos, size_type index, value_type value) {
499 return m_matrix.set_element(pos,index,value);
500 }
501
502 iterator clear_range(iterator start, iterator end) {
503 m_matrix.clear_range(start,end);
504 }
505
506 iterator clear_element(iterator pos){
507 m_matrix.clear_element(pos);
508 }
509
510 void clear(){
511 m_matrix.clear_range(begin(),end());
512 }
513private:
514 matrix_type m_matrix;
515 size_type m_row;
516};
517
518}}
519#endif