28#ifndef REMORA_CPU_SPARSE_MATRIX_HPP
29#define REMORA_CPU_SPARSE_MATRIX_HPP
31namespace remora{
namespace detail{
33template<
class T,
class I>
35 typedef sparse_matrix_storage<T,I> storage_type;
39 MatrixStorage(size_type major_size, size_type minor_size)
40 : m_major_indices_begin(major_size + 1,0)
41 , m_major_indices_end(major_size,0)
42 , m_minor_size(minor_size)
45 storage_type reserve(size_type non_zeros){
46 if(non_zeros > m_indices.size()){
47 m_indices.resize(non_zeros);
48 m_values.resize(non_zeros);
50 return {m_values.data(), m_indices.data(), m_major_indices_begin.data(), m_major_indices_end.data(), m_indices.size()};
53 storage_type resize(size_type major_size){
54 m_major_indices_begin.resize(major_size + 1);
55 m_major_indices_end.resize(major_size);
56 return reserve(m_indices.size());
59 size_type major_size()
const{
60 return m_major_indices_end.size();
63 size_type minor_size()
const{
67 template<
class Archive>
68 void serialize(Archive& ar,
const unsigned int ) {
69 ar & boost::serialization::make_nvp(
"indices", m_indices);
70 ar & boost::serialization::make_nvp(
"values", m_values);
71 ar & boost::serialization::make_nvp(
"major_indices_begin", m_major_indices_begin);
72 ar & boost::serialization::make_nvp(
"major_indices_end", m_major_indices_end);
73 ar & boost::serialization::make_nvp(
"minor_size", m_minor_size);
76 std::vector<I> m_major_indices_begin;
77 std::vector<I> m_major_indices_end;
78 std::vector<T> m_values;
79 std::vector<I> m_indices;
80 size_type m_minor_size;
82template<
class StorageManager>
83class compressed_matrix_impl{
85 typedef typename StorageManager::storage_type storage_type;
86 typedef typename StorageManager::size_type size_type;
87 typedef typename StorageManager::value_type value_type;
89 compressed_matrix_impl(StorageManager
const& manager, size_type nnz = 0)
91 , m_storage(m_manager.reserve(nnz)){};
93 compressed_matrix_impl(compressed_matrix_impl
const& impl)
94 : m_manager(impl.m_manager)
95 , m_storage(m_manager.reserve(impl.nnz_reserved())){};
97 compressed_matrix_impl(compressed_matrix_impl&& impl)
98 : m_manager(std::move(impl.m_manager))
99 , m_storage(m_manager.reserve(impl.nnz_reserved())){};
101 compressed_matrix_impl& operator=(compressed_matrix_impl
const& impl){
102 m_manager = impl.m_manager;
103 m_storage = m_manager.reserve(impl.nnz_reserved());
106 compressed_matrix_impl& operator=(compressed_matrix_impl&& impl){
107 m_manager = std::move(impl.m_manager);
108 m_storage = m_manager.reserve(impl.nnz_reserved());
113 size_type major_size()
const {
114 return m_manager.major_size();
116 size_type minor_size()
const {
117 return m_manager.minor_size();
120 storage_type
const& raw_storage()
const{
125 size_type nnz_capacity()
const{
126 return m_storage.capacity;
129 size_type nnz_reserved()
const {
130 return m_storage.major_indices_begin[major_size()];
136 size_type major_capacity(size_type i)
const{
137 REMORA_RANGE_CHECK(i < major_size());
138 return m_storage.major_indices_begin[i+1] - m_storage.major_indices_begin[i];
143 size_type major_nnz(size_type i)
const {
144 return m_storage.major_indices_end[i] - m_storage.major_indices_begin[i];
150 void set_major_nnz(size_type i,size_type non_zeros) {
151 REMORA_SIZE_CHECK(i < major_size());
152 REMORA_SIZE_CHECK(non_zeros <= major_capacity(i));
153 m_storage.major_indices_end[i] = m_storage.major_indices_begin[i]+non_zeros;
156 void reserve(size_type non_zeros) {
157 if (non_zeros < nnz_capacity())
return;
158 m_storage = m_manager.reserve(non_zeros);
167 void major_reserve(size_type i, size_type non_zeros,
bool exact_size =
false) {
168 REMORA_RANGE_CHECK(i < major_size());
169 non_zeros = std::min(minor_size(),non_zeros);
170 size_type current_capacity = major_capacity(i);
171 if (non_zeros <= current_capacity)
return;
172 size_type space_difference = non_zeros - current_capacity;
175 if (space_difference > nnz_capacity() - nnz_reserved()){
176 size_type exact = nnz_capacity() + space_difference;
177 size_type spaceous = std::max(2*nnz_capacity(),nnz_capacity() + 2*space_difference);
178 reserve(exact_size? exact:spaceous);
181 for (size_type k = major_size()-1; k != i; --k) {
182 value_type* values = m_storage.values + m_storage.major_indices_begin[k];
183 value_type* values_end = m_storage.values + m_storage.major_indices_end[k];
184 size_type* indices = m_storage.indices + m_storage.major_indices_begin[k];
185 size_type* indices_end = m_storage.indices + m_storage.major_indices_end[k];
186 std::copy_backward(values, values_end, values_end + space_difference);
187 std::copy_backward(indices, indices_end, indices_end + space_difference);
188 m_storage.major_indices_begin[k] += space_difference;
189 m_storage.major_indices_end[k] += space_difference;
191 m_storage.major_indices_begin[major_size()] += space_difference;
194 void resize(size_type major, size_type minor){
195 m_storage = m_manager.resize(major);
196 m_minor_size = minor;
199 typedef iterators::compressed_storage_iterator<value_type const, size_type const> const_major_iterator;
200 typedef iterators::compressed_storage_iterator<value_type, size_type> major_iterator;
202 const_major_iterator cmajor_begin(size_type i)
const {
203 REMORA_SIZE_CHECK(i < major_size());
204 REMORA_RANGE_CHECK(m_storage.major_indices_begin[i] <= m_storage.major_indices_end[i]);
205 return const_major_iterator(m_storage.values, m_storage.indices, m_storage.major_indices_begin[i],i);
208 const_major_iterator cmajor_end(size_type i)
const {
209 REMORA_SIZE_CHECK(i < major_size());
210 REMORA_RANGE_CHECK(m_storage.major_indices_begin[i] <= m_storage.major_indices_end[i]);
211 return const_major_iterator(m_storage.values, m_storage.indices, m_storage.major_indices_end[i],i);
214 major_iterator major_begin(size_type i) {
215 REMORA_SIZE_CHECK(i < major_size());
216 REMORA_RANGE_CHECK(m_storage.major_indices_begin[i] <= m_storage.major_indices_end[i]);
217 return major_iterator(m_storage.values, m_storage.indices, m_storage.major_indices_begin[i],i);
220 major_iterator major_end(size_type i) {
221 REMORA_SIZE_CHECK(i < major_size());
222 REMORA_RANGE_CHECK(m_storage.major_indices_begin[i] <= m_storage.major_indices_end[i]);
223 return major_iterator(m_storage.values, m_storage.indices, m_storage.major_indices_end[i],i);
226 major_iterator set_element(major_iterator pos, size_type index, value_type value) {
227 size_type major_index = pos.major_index();
228 size_type line_pos = pos - major_begin(major_index);
229 REMORA_RANGE_CHECK(major_index < major_size());
230 REMORA_RANGE_CHECK(size_type(pos - major_begin(major_index)) <= major_nnz(major_index));
231 REMORA_RANGE_CHECK(pos == major_end(major_index) || pos.index() >= index);
234 if (pos != major_end(major_index) && pos.index() == index) {
240 std::ptrdiff_t arrayPos = line_pos + m_storage.major_indices_begin[major_index];
244 if (major_capacity(major_index) == major_nnz(major_index))
245 major_reserve(major_index,std::max<size_type>(2*major_capacity(major_index),5));
249 m_storage.values + arrayPos, m_storage.values + m_storage.major_indices_end[major_index],
250 m_storage.values + m_storage.major_indices_end[major_index] + 1
253 m_storage.indices + arrayPos, m_storage.indices + m_storage.major_indices_end[major_index],
254 m_storage.indices + m_storage.major_indices_end[major_index] + 1
257 m_storage.values[arrayPos] = value;
258 m_storage.indices[arrayPos] = index;
259 ++m_storage.major_indices_end[major_index];
262 return major_begin(major_index) + (line_pos + 1);
266 major_iterator clear_range(major_iterator start, major_iterator end) {
267 REMORA_RANGE_CHECK(start.major_index() == end.major_index());
268 size_type major_index = start.major_index();
269 size_type range_size = end - start;
270 size_type range_start = start - major_begin(major_index);
271 size_type range_end = range_start + range_size;
274 auto values = m_storage.values + m_storage.major_indices_begin[major_index];
275 auto indices = m_storage.indices + m_storage.major_indices_begin[major_index];
278 std::copy(values + range_end, values + major_nnz(major_index), values + range_start);
279 std::copy(indices + range_end, indices + major_nnz(major_index), indices + range_start);
281 m_storage.major_indices_end[major_index] -= range_size;
283 return major_begin(major_index) + range_start;
286 major_iterator clear_element(major_iterator elem) {
287 REMORA_RANGE_CHECK(elem != major_end());
288 return clear_range(elem,elem + 1);
291 template<
class Archive>
292 void serialize(Archive& ar,
const unsigned int){
295 if(Archive::is_loading::value)
296 m_storage = m_manager.reserve(0);
300 StorageManager m_manager;
301 storage_type m_storage;
302 size_type m_minor_size;
306template<
class M,
class Orientation>
307class compressed_matrix_proxy:
public matrix_expression<compressed_matrix_proxy<M, Orientation>, cpu_tag>{
309 template<
class,
class>
friend class compressed_matrix_proxy;
311 typedef typename M::size_type size_type;
312 typedef typename M::value_type value_type;
313 typedef typename M::const_reference const_reference;
314 typedef typename remora::reference<M>::type reference;
316 typedef compressed_matrix_proxy<M const, Orientation> const_closure_type;
317 typedef compressed_matrix_proxy<M, Orientation> closure_type;
318 typedef typename remora::storage<M>::type storage_type;
319 typedef typename M::const_storage_type const_storage_type;
320 typedef elementwise<sparse_tag> evaluation_category;
321 typedef Orientation orientation;
325 compressed_matrix_proxy(M& m): m_matrix(&m), m_major_start(0){
326 m_major_end = M::orientation::index_M(m.size1(),m.size2());
330 compressed_matrix_proxy(M& m, size_type start, size_type end): m_matrix(&m), m_major_start(start), m_major_end(end){}
334 compressed_matrix_proxy( compressed_matrix_proxy<
typename std::remove_const<M>::type, Orientation>
const& proxy)
335 :m_matrix(proxy.m_matrix), m_major_start(proxy.m_major_start), m_major_end(proxy.m_major_end){}
341 size_type start()
const{
342 return m_major_start;
344 size_type end()
const{
350 compressed_matrix_proxy& operator=(matrix_expression<E, cpu_tag>
const& e){
351 return assign(*
this,
typename matrix_temporary<E>::type(e));
353 compressed_matrix_proxy& operator=(compressed_matrix_proxy
const& e){
354 return assign(*
this,
typename matrix_temporary<M>::type(e));
358 size_type size1()
const {
359 return orientation::index_M( m_major_end - m_major_start, minor_size(*m_matrix));
363 size_type size2()
const {
364 return orientation::index_m(m_major_end - m_major_start, minor_size(*m_matrix));
368 size_type major_capacity(size_type i)
const{
369 return m_matrix->major_capacity(m_major_start + i);
372 size_type major_nnz(size_type i)
const {
373 return m_matrix->major_nnz(m_major_start + i);
376 storage_type raw_storage()
const{
377 return m_matrix->raw_storage();
380 typename device_traits<cpu_tag>::queue_type& queue()
const{
381 return m_matrix->queue();
384 void major_reserve(size_type i, size_type non_zeros,
bool exact_size =
false) {
385 m_matrix->major_reserve(m_major_start + i, non_zeros, exact_size);
388 typedef typename major_iterator<M>::type major_iterator;
389 typedef major_iterator const_major_iterator;
391 major_iterator major_begin(size_type i)
const {
392 return m_matrix->major_begin(m_major_start + i);
395 major_iterator major_end(size_type i)
const{
396 return m_matrix->major_end(m_major_start + i);
399 major_iterator set_element(major_iterator pos, size_type index, value_type value){
400 return m_matrix->set_element(pos, index, value);
403 major_iterator clear_range(major_iterator start, major_iterator end) {
404 return m_matrix->clear_range(start,end);
408 if(m_major_start == 0 && m_major_end == major_size(*m_matrix))
410 else for(size_type i = m_major_start; i != m_major_end; ++i)
411 m_matrix->clear_range(m_matrix -> major_begin(i), m_matrix -> major_end(i));
415 size_type m_major_start;
416 size_type m_major_end;
422class compressed_matrix_row:
public vector_expression<compressed_matrix_row<M>, cpu_tag>{
424 template<
class>
friend class compressed_matrix_row;
426 typedef typename closure<M>::type matrix_type;
427 typedef typename M::size_type size_type;
428 typedef typename M::value_type value_type;
429 typedef typename M::const_reference const_reference;
430 typedef typename remora::reference<M>::type reference;
432 typedef compressed_matrix_row<M const> const_closure_type;
433 typedef compressed_matrix_row closure_type;
434 typedef typename remora::storage<M>::type::template row_storage<row_major>::type storage_type;
435 typedef typename M::const_storage_type::template row_storage<row_major>::type const_storage_type;
436 typedef elementwise<sparse_tag> evaluation_category;
439 compressed_matrix_row(matrix_type
const& m, size_type i):m_matrix(m), m_row(i){}
442 compressed_matrix_row(compressed_matrix_row<M2 >
const& ref)
443 :m_matrix(ref.m_matrix), m_row(ref.m_row){}
447 compressed_matrix_row& operator=(vector_expression<E, cpu_tag>
const& e){
448 return assign(*
this,
typename vector_temporary<E>::type(e));
451 compressed_matrix_row& operator=(compressed_matrix_row
const& e){
452 return assign(*
this,
typename vector_temporary<M>::type(e));
456 storage_type raw_storage()
const{
457 return m_matrix.raw_storage().row(m_row, row_major());
461 size_type size()
const {
462 return m_matrix.size2();
465 size_type nnz()
const {
466 return m_matrix.major_nnz(m_row);
469 size_type nnz_capacity(){
470 return m_matrix.major_capacity(m_row);
473 void reserve(size_type non_zeros) {
474 m_matrix.major_reserve(m_row, non_zeros);
477 typename device_traits<cpu_tag>::queue_type& queue(){
478 return device_traits<cpu_tag>::default_queue();
485 typedef typename major_iterator<matrix_type>::type iterator;
486 typedef iterator const_iterator;
489 iterator begin()
const {
490 return m_matrix.major_begin(m_row);
494 iterator end()
const {
495 return m_matrix.major_end(m_row);
498 iterator set_element(iterator pos, size_type index, value_type value) {
499 return m_matrix.set_element(pos,index,value);
502 iterator clear_range(iterator start, iterator end) {
503 m_matrix.clear_range(start,end);
506 iterator clear_element(iterator pos){
507 m_matrix.clear_element(pos);
511 m_matrix.clear_range(begin(),end());
514 matrix_type m_matrix;