From f91fea99362b2365620388207b84e36070728359 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Mon, 24 May 2021 21:57:50 +0200 Subject: [PATCH 1/9] rebase subtensor --- IDEs/qtcreator/include/tensor/tensor.pri | 60 +- IDEs/qtcreator/test/test_tensor.pro | 50 +- IDEs/qtcreator/tests.pri | 2 +- .../tensor/slice_detail/type_traits_slice.hpp | 50 ++ include/boost/numeric/ublas/tensor/span.hpp | 251 ++++++ .../boost/numeric/ublas/tensor/strides.hpp | 99 +++ .../boost/numeric/ublas/tensor/subtensor.hpp | 742 ++++++++++++++++++ .../ublas/tensor/subtensor_utility.hpp | 210 +++++ test/tensor/test_span.cpp | 258 ++++++ test/tensor/test_subtensor.cpp | 575 ++++++++++++++ test/tensor/test_subtensor_utility.cpp | 394 ++++++++++ 11 files changed, 2592 insertions(+), 99 deletions(-) create mode 100644 include/boost/numeric/ublas/tensor/slice_detail/type_traits_slice.hpp create mode 100644 include/boost/numeric/ublas/tensor/span.hpp create mode 100644 include/boost/numeric/ublas/tensor/strides.hpp create mode 100644 include/boost/numeric/ublas/tensor/subtensor.hpp create mode 100644 include/boost/numeric/ublas/tensor/subtensor_utility.hpp create mode 100644 test/tensor/test_span.cpp create mode 100644 test/tensor/test_subtensor.cpp create mode 100644 test/tensor/test_subtensor_utility.cpp diff --git a/IDEs/qtcreator/include/tensor/tensor.pri b/IDEs/qtcreator/include/tensor/tensor.pri index 112376c11..87b33ac6c 100644 --- a/IDEs/qtcreator/include/tensor/tensor.pri +++ b/IDEs/qtcreator/include/tensor/tensor.pri @@ -1,55 +1,5 @@ -HEADERS += \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/algorithms.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/expression.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/expression_evaluation.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/functions.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/index.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/index_functions.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/layout.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/multi_index.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/multi_index_utility.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/multiplication.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/operators_arithmetic.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/operators_comparison.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/ostream.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tags.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/concepts.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/type_traits.hpp - - -HEADERS += \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/traits/basic_type_traits.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/traits/storage_traits.hpp - -HEADERS += \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_dynamic_size.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_static_size.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_static_functions.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_static.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_base.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_functions.hpp - - -HEADERS += \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_core.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_static.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp - - -HEADERS += \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/inner_prod.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/init.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/outer_prod.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/trans.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/norm.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/imag.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/real.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/conj.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/tensor_times_matrix.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/tensor_times_tensor.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/reshape.hpp +HEADERS += $${INCLUDE_DIR}/boost/numeric/ublas/tensor/*.hpp +HEADERS += $${INCLUDE_DIR}/boost/numeric/ublas/tensor/traits/*.hpp +HEADERS += $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/*.hpp +HEADERS += $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/*.hpp +HEADERS += $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/*.hpp diff --git a/IDEs/qtcreator/test/test_tensor.pro b/IDEs/qtcreator/test/test_tensor.pro index 8deee2f99..8de6aba84 100644 --- a/IDEs/qtcreator/test/test_tensor.pro +++ b/IDEs/qtcreator/test/test_tensor.pro @@ -18,18 +18,10 @@ clang: QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp=libiomp5 gcc:QMAKE_CXXFLAGS_DEBUG = -g clang: QMAKE_CXXFLAGS_DEBUG =-g - #QMAKE_CXXFLAGS += --coverage BOOST_ROOT=../../../../../.. -#exists( $$BOOST_ROOT/boost-build.jam ) { -# message("Boost installed.") -# INCLUDEPATH += $${BOOST_ROOT}/libs/numeric/ublas/include -# LIBS += -L$${BOOST_ROOT}/stage/lib -lgomp -# QMAKE_RPATHDIR += $${BOOST_ROOT}/stage/lib -#} - QMAKE_RPATHDIR += $${BOOST_ROOT}/stage/lib INCLUDEPATH+=$$BOOST_ROOT/libs/numeric/ublas/include LIBS+=-L$${BOOST_ROOT}/stage/lib -lboost_unit_test_framework -lgomp @@ -41,39 +33,11 @@ TEST_DIR = ../../../test/tensor include(../include/tensor/tensor.pri) -HEADERS += \ - $${TEST_DIR}/utility.hpp +HEADERS += $${TEST_DIR}/utility.hpp -SOURCES += \ - $${TEST_DIR}/test_algorithms.cpp \ - $${TEST_DIR}/test_einstein_notation.cpp \ - $${TEST_DIR}/test_expression.cpp \ - $${TEST_DIR}/test_expression_evaluation.cpp \ - $${TEST_DIR}/test_extents_dynamic.cpp \ - $${TEST_DIR}/test_extents_dynamic_rank_static.cpp \ - $${TEST_DIR}/test_fixed_rank_expression_evaluation.cpp \ - $${TEST_DIR}/test_fixed_rank_extents.cpp \ - $${TEST_DIR}/test_fixed_rank_functions.cpp \ - $${TEST_DIR}/test_fixed_rank_operators_arithmetic.cpp \ - $${TEST_DIR}/test_fixed_rank_operators_comparison.cpp \ - $${TEST_DIR}/test_fixed_rank_strides.cpp \ - $${TEST_DIR}/test_fixed_rank_tensor.cpp \ - $${TEST_DIR}/test_fixed_rank_tensor_matrix_vector.cpp \ - $${TEST_DIR}/test_functions.cpp \ - $${TEST_DIR}/test_multi_index.cpp \ - $${TEST_DIR}/test_multi_index_utility.cpp \ - $${TEST_DIR}/test_multiplication.cpp \ - $${TEST_DIR}/test_operators_arithmetic.cpp \ - $${TEST_DIR}/test_operators_comparison.cpp \ - $${TEST_DIR}/test_static_expression_evaluation.cpp \ - $${TEST_DIR}/test_static_extents.cpp \ - # $${TEST_DIR}/test_static_functions.cpp \ - $${TEST_DIR}/test_static_operators_arithmetic.cpp \ - $${TEST_DIR}/test_static_operators_comparison.cpp \ - $${TEST_DIR}/test_static_strides.cpp \ - $${TEST_DIR}/test_static_tensor.cpp \ - $${TEST_DIR}/test_static_tensor_matrix_vector.cpp \ - $${TEST_DIR}/test_strides.cpp \ - $${TEST_DIR}/test_tensor.cpp \ - $${TEST_DIR}/test_tensor_matrix_vector.cpp \ - $${TEST_DIR}/test_extents_functions.cpp +SOURCES += $${TEST_DIR}/algorithm/test_*.cpp +SOURCES += $${TEST_DIR}/extents/test_*.cpp +SOURCES += $${TEST_DIR}/functions/test_*.cpp +SOURCES += $${TEST_DIR}/multiplicatoin/test_*.cpp +SOURCES += $${TEST_DIR}/tensor/test_*.cpp +SOURCES += $${TEST_DIR}/test_*.cpp diff --git a/IDEs/qtcreator/tests.pri b/IDEs/qtcreator/tests.pri index 04e131f59..33721d551 100644 --- a/IDEs/qtcreator/tests.pri +++ b/IDEs/qtcreator/tests.pri @@ -33,7 +33,7 @@ SUBDIRS += \ # test_triangular \ # triangular_access \ # triangular_layout \ - # test_tensor +# test_tensor #begin_end.file = test/begin_end.pro #comp_mat_erase.file = test/comp_mat_erase.pro diff --git a/include/boost/numeric/ublas/tensor/slice_detail/type_traits_slice.hpp b/include/boost/numeric/ublas/tensor/slice_detail/type_traits_slice.hpp new file mode 100644 index 000000000..54f39b792 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/slice_detail/type_traits_slice.hpp @@ -0,0 +1,50 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google +// + +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_TYPE_TRAITS_SLICE_HPP_ +#define _BOOST_NUMERIC_UBLAS_TENSOR_TYPE_TRAITS_SLICE_HPP_ + +#include +#include + +namespace boost::numeric::ublas::experimental { + + template + struct basic_slice; + + template + struct is_slice : std::false_type{}; + + template + inline static constexpr auto const is_slice_v = is_slice::value; + +} // namespace boost::numeric::ublas::span + +namespace boost::numeric::ublas::experimental { + + template + struct is_slice< basic_slice > : std::true_type{}; + +} // namespace boost::numeric::ublas::span + +namespace boost::numeric::ublas{ + + template + struct is_dynamic< experimental::basic_slice > : std::true_type{}; + + template + struct is_static< experimental::basic_slice > : std::true_type{}; + +} // namespace boost::numeric::ublas + + +#endif diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp new file mode 100644 index 000000000..f0875b60c --- /dev/null +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -0,0 +1,251 @@ +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which started as a Google Summer of Code project. +// + + +#ifndef _BOOST_UBLAS_TENSOR_SPAN_ +#define _BOOST_UBLAS_TENSOR_SPAN_ + +#include +#include +#include +#include + +namespace boost { +namespace numeric { +namespace ublas { +namespace tag { + +struct sliced {}; +struct strided {}; + +} +} +} +} + + +namespace boost::numeric::ublas { + +/** \class span + * \ingroup Core_Module + * + * \brief Selection operator class to initialize stl::multi_subarray + * + * This class is used to generate stl::multi_subarray from stl::multi_array and to + * work on views. + * \note zero based indexing is used. + * + */ + + + +//using offsets = std::vector; + +template +class span; + + +static constexpr inline std::size_t end = std::numeric_limits::max(); + +template<> +class span +{ +public: + using span_tag = tag::strided; + using value_type = std::size_t; + + // covers the complete range of one dimension + // e.g. a(:) + constexpr explicit span() + : first_{} + , last_ {} + , step_ {} + , size_ {} + {} + + + // covers a linear range of one dimension + // e.g. a(1:3:n) + span(value_type f, value_type s, value_type l) + : first_(f) + , last_ (l) + , step_ (s) + { + if(f == l){ + last_ = l; + size_ = value_type(1); + } + else { + if(s == 0 && f != l) + throw std::runtime_error("Error in span::span : cannot have a step_ equal to zero."); + + if(f > l) + throw std::runtime_error("Error in span::span: last_ is smaller than first"); + + last_ = l - ((l-f)%s); + size_ = (last_-first_)/s+value_type(1); + } + } + + // covers only one index of one dimension + // e.g. a(1) or a(end) + span(value_type n) + : span(n,1,n) + { + } + + span(span const& other) + : first_(other.first_) + , last_ (other.last_ ) + , step_ (other.step_ ) + , size_ (other.size_ ) + { + } + + span& operator=(span const& other) + { + first_ = other.first_; + last_ = other.last_ ; + step_ = other.step_ ; + size_ = other.size_ ; + return *this; + } + + inline auto first() const {return first_; } + inline auto last () const {return last_ ; } + inline auto step () const {return step_ ; } + inline auto size () const {return size_ ; } + + ~span() = default; + + inline value_type operator[] (std::size_t idx) const + { + return first_ + idx * step_; + } + + inline span operator()(const span &rhs) const + { + auto const& lhs = *this; + return span( + rhs.first_*lhs.step_ + lhs.first_, + lhs.step_ *rhs.step_, + rhs.last_ *lhs.step_ + lhs.first_ ); + } + +protected: + + value_type first_, last_ , step_, size_; +}; + +using strided_span = span; + +} // namespace + + +///////////// + +namespace boost::numeric::ublas { + +template<> +class span : + private span +{ + using super_type = span; +public: + using span_tag = tag::sliced; + using value_type = typename super_type::value_type; + constexpr explicit span() + : super_type() + { + } + + span(value_type f, value_type l) + : super_type(f, value_type(1), l ) + { + } + + span(value_type n) + : super_type(n) + { + } + + span(span const& other) + : super_type(other) + { + } + + inline span& operator=(const span &other) + { + super_type::operator=(other); + return *this; + } + + ~span() = default; + + inline value_type operator[] (std::size_t idx) const + { + return super_type::operator [](idx); + } + + inline auto first() const {return super_type::first(); } + inline auto last () const {return super_type::last (); } + inline auto step () const {return super_type::step (); } + inline auto size () const {return super_type::size (); } + + inline span operator()(const span &rhs) const + { + auto const& lhs = *this; + return span( rhs.first_ + lhs.first_, rhs.last_ + lhs.first_ ); + } +}; + +using sliced_span = span; + + +template +inline auto ran(unsigned_type f, unsigned_type l) +{ + return sliced_span(f,l); +} + +template +inline auto ran(unsigned_type f, unsigned_type s, unsigned_type l) +{ + return strided_span(f,s,l); +} + +} // namespace + + +template +std::ostream& operator<< (std::ostream& out, boost::numeric::ublas::span const& s) +{ + return out << "[" << s.first() << ":" << s.step() << ":" << s.last() << "]" << std::endl; +} + +template +inline bool operator==( + boost::numeric::ublas::span const& lhs, + boost::numeric::ublas::span const& rhs) +{ + return lhs.first() == rhs.first() && lhs.last() == rhs.last() && lhs.step() == rhs.step(); +} + + +template +inline bool operator!=( + boost::numeric::ublas::span const& lhs, + boost::numeric::ublas::span const& rhs) +{ + return lhs.first() != rhs.first() || lhs.last() != rhs.last() || lhs.step() != rhs.step(); +} + +#endif // FHG_range_H diff --git a/include/boost/numeric/ublas/tensor/strides.hpp b/include/boost/numeric/ublas/tensor/strides.hpp new file mode 100644 index 000000000..0dac93bb7 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/strides.hpp @@ -0,0 +1,99 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google +// +/// \file strides.hpp Definition for the basic_strides template class + +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_STRIDES_HPP_ +#define _BOOST_NUMERIC_UBLAS_TENSOR_STRIDES_HPP_ + +#include +#include +#include + +namespace boost::numeric::ublas{ + + template && is_strides_v + , int> = 0 + > + [[nodiscard]] inline + constexpr bool operator==(LStrides const& lhs, RStrides const& rhs) noexcept{ + static_assert( std::is_same_v, + "boost::numeric::ublas::operator==(LStrides,RStrides) : LHS value type should be same as RHS value type"); + + return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin()); + } + + template && is_strides_v + , int> = 0 + > + [[nodiscard]] inline + constexpr bool operator!=(LStrides const& lhs, RStrides const& rhs) noexcept{ + static_assert( std::is_same_v, + "boost::numeric::ublas::operator!=(LStrides,RStrides) : LHS value type should be same as RHS value type"); + return !( lhs == rhs ); + } + +} // namespace boost::numeric::ublas + + +namespace boost::numeric::ublas::detail { + + /** @brief Returns relative memory index with respect to a multi-index + * + * @code auto j = access(std::vector{3,4,5}, strides{shape{4,2,3},first_order}); @endcode + * + * @param[in] i multi-index of length p + * @param[in] w stride vector of length p + * @returns relative memory location depending on \c i and \c w + */ + template + [[nodiscard]] inline + constexpr auto access(std::vector const& i, Stride const& w) + { + static_assert( is_strides_v, + "boost::numeric::ublas::detail::access() : invalid type, type should be a strides"); + + const auto p = i.size(); + size_type sum = 0u; + for(auto r = 0u; r < p; ++r) + sum += i[r]*w[r]; + return sum; + } + + /** @brief Returns relative memory index with respect to a multi-index + * + * @code auto j = access(0, strides{shape{4,2,3},first_order}, 2,3,4); @endcode + * + * @param[in] i first element of the partial multi-index + * @param[in] is the following elements of the partial multi-index + * @param[in] sum the current relative memory index + * @returns relative memory location depending on \c i and \c w + */ + template + [[nodiscard]] + constexpr auto access(std::size_t sum, Stride const& w, std::size_t i, size_types ... is) + { + static_assert( is_strides_v, + "boost::numeric::ublas::detail::access() : invalid type, type should be a strides"); + sum += i*w[r]; + if constexpr (sizeof...(is) == 0) + return sum; + else + return detail::access(sum,w,std::forward(is)...); + } + +} // namespace boost::numeric::ublas::detail + +#endif diff --git a/include/boost/numeric/ublas/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/subtensor.hpp new file mode 100644 index 000000000..d03130610 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/subtensor.hpp @@ -0,0 +1,742 @@ +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which firsted as a Google Summer of Code project. +// + + +/// \file subtensor.hpp Definition for the tensor template class + +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP_ +#define _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP_ + + + + +#include +#include +#include +#include +#include + + +namespace boost::numeric::ublas { + +template +class dynamic_tensor; + +template +class matrix; + +template +class vector; + + + + + +/** @brief A view of a dense tensor of values of type \c T. + * + * @tparam T type of the objects stored in the tensor (like int, double, complex,...) + * @tparam F + * @tparam A The type of the storage array of the tensor. Default is \c unbounded_array. \c and \c std::vector can also be used +*/ +template +class subtensor; + + +/** @brief A sliced view of a dense tensor of values of type \c T. + * + * For a \f$n\f$-dimensional tensor \f$v\f$ and \f$0\leq i < n\f$ every element \f$v_i\f$ is mapped + * to the \f$i\f$-th element of the container. A storage type \c A can be specified which defaults to \c unbounded_array. + * Elements are constructed by \c A, which need not initialise their value. + * + * @tparam T type of the objects stored in the tensor (like int, double, complex,...) + * @tparam F type of the layout which can be either + * @tparam A The type of the storage array of the tensor. Default is \c unbounded_array. \c and \c std::vector can also be used + */ +template +class subtensor > + : public detail::tensor_expression< + subtensor> , + subtensor> > +{ + + static_assert( std::is_same::value || std::is_same::value, + "boost::numeric::tensor template class only supports first- or last-order storage formats."); + + using tensor_type = dynamic_tensor; + using self_type = subtensor; +public: + + using domain_tag = tag::sliced; + + using span_type = span; + + template + using tensor_expression_type = detail::tensor_expression; + + template + using matrix_expression_type = matrix_expression; + + template + using vector_expression_type = vector_expression; + + using super_type = tensor_expression_type; + +// static_assert(std::is_same_v, detail::tensor_expression,tensor>>, "tensor_expression_type"); + + using array_type = typename tensor_type::array_type; + using layout_type = typename tensor_type::layout_type; + + using size_type = typename tensor_type::size_type; + using difference_type = typename tensor_type::difference_type; + using value_type = typename tensor_type::value_type; + + using reference = typename tensor_type::reference; + using const_reference = typename tensor_type::const_reference; + + using pointer = typename tensor_type::pointer; + using const_pointer = typename tensor_type::const_pointer; + +// using iterator = typename array_type::iterator; +// using const_iterator = typename array_type::const_iterator; + +// using reverse_iterator = typename array_type::reverse_iterator; +// using const_reverse_iterator = typename array_type::const_reverse_iterator; + + using tensor_temporary_type = self_type; + using storage_category = dense_tag; + + using strides_type = basic_strides; + using extents_type = basic_extents; + + using matrix_type = matrix; + using vector_type = vector; + + + + /** @brief Deleted constructor of a subtensor */ + subtensor () = delete; + + /** @brief Constructs a tensor view from a tensor without any range. + * + */ + BOOST_UBLAS_INLINE + subtensor (tensor_type& t) + : super_type () + , spans_ () + , extents_ (t.extents()) + , strides_ (t.strides()) + , span_strides_ (t.strides()) + , data_ (t.data()) + { + } + + template + subtensor(tensor_type& t, span_types&& ... spans) + : super_type () + , spans_ (detail::generate_span_vector(t.extents(),std::forward(spans)...)) + , extents_ (detail::compute_extents(spans_)) + , strides_ (extents_) + , span_strides_ (detail::compute_span_strides(t.strides(),spans_)) + , data_ {t.data() + detail::compute_offset(t.strides(), spans_)} + { +// if( m == nullptr) +// throw std::length_error("Error in tensor_view::tensor_view : multi_array_type is nullptr."); +// if( t == nullptr) +// throw std::length_error("Error in tensor_view::tensor_view : tensor_type is nullptr."); + } + + + /** @brief Constructs a tensor view from a tensor without any range. + * + * @note can be regarded as a pointer to a tensor + */ + explicit + subtensor (tensor_type const& t) + : super_type () + , spans_() + , extents_ (t.extents()) + , strides_ (t.strides()) + , span_strides_(t.strides()) + , data_ (t.data()) + { + } + +#if 0 + /** @brief Constructs a tensor with a \c shape and initiates it with one-dimensional data + * + * @code tensor A{extents{4,2,3}, array }; @endcode + * + * + * @param s initial tensor dimension extents + * @param a container of \c array_type that is copied according to the storage layout + */ + BOOST_UBLAS_INLINE + tensor (extents_type const& s, const array_type &a) + : tensor_expression_type() //tensor_container() + , extents_ (s) + , strides_ (extents_) + , data_ (a) + { + if(this->extents_.product() != this->data_.size()) + throw std::runtime_error("Error in boost::numeric::ublas::tensor: size of provided data and specified extents do not match."); + } + + + + /** @brief Constructs a tensor using a shape tuple and initiates it with a value. + * + * @code tensor A{extents{4,2,3}, 1 }; @endcode + * + * @param e initial tensor dimension extents + * @param i initial value of all elements of type \c value_type + */ + BOOST_UBLAS_INLINE + tensor (extents_type const& e, const value_type &i) + : tensor_expression_type() //tensor_container () + , extents_ (e) + , strides_ (extents_) + , data_ (extents_.product(), i) + {} + + + + /** @brief Constructs a tensor from another tensor + * + * @param v tensor to be copied. + */ + BOOST_UBLAS_INLINE + tensor (const tensor &v) + : tensor_expression_type() + , extents_ (v.extents_) + , strides_ (v.strides_) + , data_ (v.data_ ) + {} + + + + /** @brief Constructs a tensor from another tensor + * + * @param v tensor to be moved. + */ + BOOST_UBLAS_INLINE + tensor (tensor &&v) + : tensor_expression_type() //tensor_container () + , extents_ (std::move(v.extents_)) + , strides_ (std::move(v.strides_)) + , data_ (std::move(v.data_ )) + {} + + + /** @brief Constructs a tensor with a matrix + * + * \note Initially the tensor will be two-dimensional. + * + * @param v matrix to be copied. + */ + BOOST_UBLAS_INLINE + tensor (const matrix_type &v) + : tensor_expression_type() + , extents_ () + , strides_ () + , data_ (v.data()) + { + if(!data_.empty()){ + extents_ = extents_type{v.size1(),v.size2()}; + strides_ = strides_type(extents_); + } + } + + /** @brief Constructs a tensor with a matrix + * + * \note Initially the tensor will be two-dimensional. + * + * @param v matrix to be moved. + */ + BOOST_UBLAS_INLINE + tensor (matrix_type &&v) + : tensor_expression_type() + , extents_ {} + , strides_ {} + , data_ {} + { + if(v.size1()*v.size2() != 0){ + extents_ = extents_type{v.size1(),v.size2()}; + strides_ = strides_type(extents_); + data_ = std::move(v.data()); + } + } + + /** @brief Constructs a tensor using a \c vector + * + * @note It is assumed that vector is column vector + * @note Initially the tensor will be one-dimensional. + * + * @param v vector to be copied. + */ + BOOST_UBLAS_INLINE + tensor (const vector_type &v) + : tensor_expression_type() + , extents_ () + , strides_ () + , data_ (v.data()) + { + if(!data_.empty()){ + extents_ = extents_type{data_.size(),1}; + strides_ = strides_type(extents_); + } + } + + /** @brief Constructs a tensor using a \c vector + * + * @param v vector to be moved. + */ + BOOST_UBLAS_INLINE + tensor (vector_type &&v) + : tensor_expression_type() + , extents_ {} + , strides_ {} + , data_ {} + { + if(v.size() != 0){ + extents_ = extents_type{v.size(),1}; + strides_ = strides_type(extents_); + data_ = std::move(v.data()); + } + } + + + /** @brief Constructs a tensor with another tensor with a different layout + * + * @param other tensor with a different layout to be copied. + */ + BOOST_UBLAS_INLINE + template + tensor (const tensor &other) + : tensor_expression_type () + , extents_ (other.extents()) + , strides_ (other.extents()) + , data_ (other.extents().product()) + { + copy(this->rank(), this->extents().data(), + this->data(), this->strides().data(), + other.data(), other.strides().data()); + } + + /** @brief Constructs a tensor with an tensor expression + * + * @code tensor A = B + 3 * C; @endcode + * + * @note type must be specified of tensor must be specified. + * @note dimension extents are extracted from tensors within the expression. + * + * @param expr tensor expression + */ + BOOST_UBLAS_INLINE + template + tensor (const tensor_expression_type &expr) + : tensor_expression_type () + , extents_ ( detail::retrieve_extents(expr) ) + , strides_ ( extents_ ) + , data_ ( extents_.product() ) + { + static_assert( detail::has_tensor_types>::value, + "Error in boost::numeric::ublas::tensor: expression does not contain a tensor. cannot retrieve shape."); + detail::eval( *this, expr ); + } + + /** @brief Constructs a tensor with a matrix expression + * + * @code tensor A = B + 3 * C; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr matrix expression + */ + BOOST_UBLAS_INLINE + template + tensor (const matrix_expression_type &expr) + : tensor( matrix_type ( expr ) ) + { + } + + /** @brief Constructs a tensor with a vector expression + * + * @code tensor A = b + 3 * b; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr vector expression + */ + BOOST_UBLAS_INLINE + template + tensor (const vector_expression_type &expr) + : tensor( vector_type ( expr ) ) + { + } + + /** @brief Evaluates the tensor_expression and assigns the results to the tensor + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must conform with this tensor. + * + * @param expr expression that is evaluated. + */ + BOOST_UBLAS_INLINE + template + tensor &operator = (const tensor_expression_type &expr) + { + detail::eval(*this, expr); + return *this; + } + + tensor& operator=(tensor other) + { + swap (*this, other); + return *this; + } + + tensor& operator=(const_reference v) + { + std::fill(this->begin(), this->end(), v); + return *this; + } +#endif + + + /** @brief Returns true if the subtensor is empty (\c size==0) */ + BOOST_UBLAS_INLINE + bool empty () const { + return this->size() == size_type(0); + } + + + /** @brief Returns the size of the subtensor */ + BOOST_UBLAS_INLINE + size_type size () const { + return product(this->extents_); + } + + /** @brief Returns the size of the subtensor */ + BOOST_UBLAS_INLINE + size_type size (size_type r) const { + return this->extents_.at(r); + } + + /** @brief Returns the number of dimensions/modes of the subtensor */ + BOOST_UBLAS_INLINE + size_type rank () const { + return this->extents_.size(); + } + + /** @brief Returns the number of dimensions/modes of the subtensor */ + BOOST_UBLAS_INLINE + size_type order () const { + return this->extents_.size(); + } + + /** @brief Returns the strides of the subtensor */ + BOOST_UBLAS_INLINE + auto const& strides () const { + return this->strides_; + } + + /** @brief Returns the span strides of the subtensor */ + BOOST_UBLAS_INLINE + auto const& span_strides () const { + return this->span_strides_; + } + + /** @brief Returns the span strides of the subtensor */ + BOOST_UBLAS_INLINE + auto const& spans () const { + return this->spans_; + } + + + /** @brief Returns the extents of the subtensor */ + BOOST_UBLAS_INLINE + auto const& extents () const { + return this->extents_; + } + + + /** @brief Returns a \c const reference to the container. */ + BOOST_UBLAS_INLINE + const_pointer data () const { + return this->data_; + } + + /** @brief Returns a \c const reference to the container. */ + BOOST_UBLAS_INLINE + pointer data () { + return this->data_; + } + + + + +#if 0 + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + BOOST_UBLAS_INLINE + const_reference operator [] (size_type i) const { + return this->data_[i]; + } + + /** @brief Element access using a single index. + * + * + * @code A[i] = a; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + BOOST_UBLAS_INLINE + reference operator [] (size_type i) + { + return this->data_[i]; + } + + + /** @brief Element access using a multi-index or single-index. + * + * + * @code auto a = A.at(i,j,k); @endcode or + * @code auto a = A.at(i); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + BOOST_UBLAS_INLINE + const_reference at (size_type i, size_types ... is) const { + if constexpr (sizeof...(is) == 0) + return this->data_[i]; + else + return this->data_[detail::access<0ul>(size_type(0),this->strides_,i,std::forward(is)...)]; + } + + /** @brief Element access using a multi-index or single-index. + * + * + * @code A.at(i,j,k) = a; @endcode or + * @code A.at(i) = a; @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + BOOST_UBLAS_INLINE + template + reference at (size_type i, size_types ... is) { + if constexpr (sizeof...(is) == 0) + return this->data_[i]; + else + return this->data_[detail::access<0ul>(size_type(0),this->strides_,i,std::forward(is)...)]; + } + + + + + /** @brief Element access using a single index. + * + * + * @code A(i) = a; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + BOOST_UBLAS_INLINE + const_reference operator()(size_type i) const { + return this->data_[i]; + } + + + /** @brief Element access using a single index. + * + * @code A(i) = a; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + BOOST_UBLAS_INLINE + reference operator()(size_type i){ + return this->data_[i]; + } + + + + + /** @brief Generates a tensor index for tensor contraction + * + * + * @code auto Ai = A(_i,_j,k); @endcode + * + * @param i placeholder + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + BOOST_UBLAS_INLINE + template + decltype(auto) operator() (index::index_type p, index_types ... ps) const + { + constexpr auto N = sizeof...(ps)+1; + if( N != this->rank() ) + throw std::runtime_error("Error in boost::numeric::ublas::operator(): size of provided index_types does not match with the rank."); + + return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); + } + + + + + + /** @brief Reshapes the tensor + * + * + * (1) @code A.reshape(extents{m,n,o}); @endcode or + * (2) @code A.reshape(extents{m,n,o},4); @endcode + * + * If the size of this smaller than the specified extents than + * default constructed (1) or specified (2) value is appended. + * + * @note rank of the tensor might also change. + * + * @param e extents with which the tensor is reshaped. + * @param v value which is appended if the tensor is enlarged. + */ + BOOST_UBLAS_INLINE + void reshape (extents_type const& e, value_type v = value_type{}) + { + this->extents_ = e; + this->strides_ = strides_type(this->extents_); + + if(e.product() != this->size()) + this->data_.resize (this->extents_.product(), v); + } + + + friend void swap(tensor& lhs, tensor& rhs) { + std::swap(lhs.data_ , rhs.data_ ); + std::swap(lhs.extents_, rhs.extents_); + std::swap(lhs.strides_, rhs.strides_); + } + + + /// \brief return an iterator on the first element of the tensor + BOOST_UBLAS_INLINE + const_iterator begin () const { + return data_.begin (); + } + + /// \brief return an iterator on the first element of the tensor + BOOST_UBLAS_INLINE + const_iterator cbegin () const { + return data_.cbegin (); + } + + /// \brief return an iterator after the last element of the tensor + BOOST_UBLAS_INLINE + const_iterator end () const { + return data_.end(); + } + + /// \brief return an iterator after the last element of the tensor + BOOST_UBLAS_INLINE + const_iterator cend () const { + return data_.cend (); + } + + /// \brief Return an iterator on the first element of the tensor + BOOST_UBLAS_INLINE + iterator begin () { + return data_.begin(); + } + + /// \brief Return an iterator at the end of the tensor + BOOST_UBLAS_INLINE + iterator end () { + return data_.end(); + } + + /// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor) + BOOST_UBLAS_INLINE + const_reverse_iterator rbegin () const { + return data_.rbegin(); + } + + /// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor) + BOOST_UBLAS_INLINE + const_reverse_iterator crbegin () const { + return data_.crbegin(); + } + + /// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor) + BOOST_UBLAS_INLINE + const_reverse_iterator rend () const { + return data_.rend(); + } + + /// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor) + BOOST_UBLAS_INLINE + const_reverse_iterator crend () const { + return data_.crend(); + } + + /// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor) + BOOST_UBLAS_INLINE + reverse_iterator rbegin () { + return data_.rbegin(); + } + + /// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor) + BOOST_UBLAS_INLINE + reverse_iterator rend () { + return data_.rend(); + } + + +#if 0 + // ------------- + // Serialization + // ------------- + + /// Serialize a tensor into and archive as defined in Boost + /// \param ar Archive object. Can be a flat file, an XML file or any other stream + /// \param file_version Optional file version (not yet used) + template + void serialize(Archive & ar, const unsigned int /* file_version */){ + ar & serialization::make_nvp("data",data_); + } +#endif + +#endif + +private: + + std::vector spans_; + extents_type extents_; + strides_type strides_; + strides_type span_strides_; + pointer data_; +}; + + +} // namespaces + + + + + + +#endif diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp new file mode 100644 index 000000000..6c42763d0 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -0,0 +1,210 @@ +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which firsted as a Google Summer of Code project. +// + + +/// \file subtensor_utility.hpp Definition for the tensor template class + +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ +#define _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ + +#include +#include +#include + +#include +#include +#include + + +namespace boost::numeric::ublas::detail { + + +/*! @brief Computes span strides for a subtensor + * + * span stride v is computed according to: v[i] = w[i]*s[i], where + * w[i] is the i-th stride of the tensor + * s[i] is the step size of the i-th span + * + * @param[in] strides strides of the tensor, the subtensor refers to + * @param[in] spans vector of spans of the subtensor +*/ +template +auto compute_span_strides(strides_type const& strides, spans_type const& spans) +{ + if(strides.size() != spans.size()) + throw std::runtime_error("Error in boost::numeric::ublas::subtensor::compute_span_strides(): tensor strides.size() != spans.size()"); + + using base_type = typename strides_type::base_type; + auto span_strides = base_type(spans.size()); + + std::transform(strides.begin(), strides.end(), spans.begin(), span_strides.begin(), + [](auto w, auto const& s) { return w * s.step(); } ); + + return strides_type( span_strides ); +} + +/*! @brief Computes the data pointer offset for a subtensor + * + * offset is computed according to: sum ( f[i]*w[i] ), where + * f[i] is the first element of the i-th span + * w[i] is the i-th stride of the tensor + * + * @param[in] strides strides of the tensor, the subtensor refers to + * @param[in] spans vector of spans of the subtensor +*/ +template +auto compute_offset(strides_type const& strides, spans_type const& spans) +{ + if(strides.size() != spans.size()) + throw std::runtime_error("Error in boost::numeric::ublas::subtensor::offset(): tensor strides.size() != spans.size()"); + + using value_type = typename strides_type::value_type; + + return std::inner_product(spans.begin(), spans.end(), strides.begin(), value_type(0), + std::plus(), [](auto const& s, value_type w) {return s.first() * w; } ); +} + + +/*! @brief Computes the extents of the subtensor. + * + * i-th extent is given by span[i].size() + * + * @param[in] spans vector of spans of the subtensor + */ +template +auto compute_extents(spans_type const& spans) +{ + using extents_t = basic_extents; + using base_type = typename extents_t::base_type; + if(spans.empty()) + return extents_t{}; + auto extents = base_type(spans.size()); + std::transform(spans.begin(), spans.end(), extents.begin(), [](auto const& s) { return s.size(); } ); + return extents_t( extents ); +} + + +/*! @brief Auxiliary function for subtensor which possibly transforms a span instance + * + * transform_span(span() ,4) -> span(0,3) + * transform_span(span(1,1) ,4) -> span(1,1) + * transform_span(span(1,3) ,4) -> span(1,3) + * transform_span(span(2,end),4) -> span(2,3) + * transform_span(span(end) ,4) -> span(3,3) + * + * @note span is zero-based indexed. + * + * @param[in] s span that is going to be transformed + * @param[in] extent extent that is maybe used for the tranformation + */ +template +auto transform_span(span const& s, size_type const extent) +{ + using span_type = span; + + size_type first = s.first(); + size_type last = s.last (); + size_type size = s.size (); + + auto const extent0 = extent-1; + + auto constexpr is_sliced = std::is_same::value; + + + if constexpr ( is_sliced ){ + if(size == 0) return span_type(0 , extent0); + else if(first== end) return span_type(extent0 , extent0); + else if(last == end) return span_type(first , extent0); + else return span_type(first , last ); + } + else { + size_type step = s.step (); + if(size == 0) return span_type(0 , size_type(1), extent0); + else if(first== end) return span_type(extent0 , step, extent0); + else if(last == end) return span_type(first , step, extent0); + else return span_type(first , step, last ); + } +} + + +template +void transform_spans_impl (basic_extents const& extents, std::array& span_array, std::size_t arg, span_types&& ... spans ); + +template +void transform_spans_impl(basic_extents const& extents, std::array& span_array, span const& s, span_types&& ... spans) +{ + std::get(span_array) = transform_span(s, extents[r]); + static constexpr auto nspans = sizeof...(spans); + static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); + if constexpr (nspans>0) + transform_spans_impl(extents, span_array, std::forward(spans)...); +} + +template +void transform_spans_impl (basic_extents const& extents, std::array& span_array, std::size_t arg, span_types&& ... spans ) +{ + static constexpr auto nspans = sizeof...(spans); + static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); + std::get(span_array) = transform_span(span_type(arg), extents[r]); + if constexpr (nspans>0) + transform_spans_impl(extents, span_array, std::forward(spans) ... ); + +} + + +/*! @brief Auxiliary function for subtensor that generates array of spans + * + * generate_span_array(shape(4,3,5,2), span(), 1, span(2,end), end ) + * -> std::array (span(0,3), span(1,1), span(2,4),span(1,1)) + * + * @note span is zero-based indexed. + * + * @param[in] extents of the tensor + * @param[in] spans spans with which the subtensor is created + */ +template +auto generate_span_array(basic_extents const& extents, span_types&& ... spans) +{ + constexpr static auto n = sizeof...(spans); + if(extents.size() != n) + throw std::runtime_error("Error in boost::numeric::ublas::generate_span_vector() when creating subtensor: the number of spans does not match with the tensor rank."); + std::array span_array; + if constexpr (n>0) + transform_spans_impl<0>( extents, span_array, std::forward(spans)... ); + return span_array; +} + + +/*! @brief Auxiliary function for subtensor that generates array of spans + * + * generate_span_array(shape(4,3,5,2), span(), 1, span(2,end), end ) + * -> std::array (span(0,3), span(1,1), span(2,4),span(1,1)) + * + * @note span is zero-based indexed. + * + * @param[in] extents of the tensor + * @param[in] spans spans with which the subtensor is created + */ +template +auto generate_span_vector(basic_extents const& extents, span_types&& ... spans) +{ + auto span_array = generate_span_array(extents,std::forward(spans)...); + return std::vector(span_array.begin(), span_array.end()); +} + + +} // namespace boost::numeric::ublas::detail + + + + + +#endif diff --git a/test/tensor/test_span.cpp b/test/tensor/test_span.cpp new file mode 100644 index 000000000..1b1da2a63 --- /dev/null +++ b/test/tensor/test_span.cpp @@ -0,0 +1,258 @@ +// Copyright (c) 2018 Cem Bassoy +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which started as a Google Summer of Code project. +// + +#include +#include +#include + + +BOOST_AUTO_TEST_SUITE( span_testsuite ); + +struct fixture { + using span_type = boost::numeric::ublas::strided_span; + + fixture() : + spans { + span_type{}, // 0 + span_type(0,0,0), // 1 + span_type(0,1,0), // 2 + span_type(0,1,2), // 3 + span_type(1,1,2), // 4 + span_type(0,2,4), // 5 + span_type(1,2,4), // 6 + span_type(1,3,5), // 7 + span_type(1,3,7) // 8 + } + {} + std::vector spans; +}; + + + +BOOST_FIXTURE_TEST_CASE( ctor_test, fixture ) +{ + using span_type = boost::numeric::ublas::strided_span; + + BOOST_CHECK_EQUAL (spans[0].first(),0); + BOOST_CHECK_EQUAL (spans[0].step (),0); + BOOST_CHECK_EQUAL (spans[0].last (),0); + BOOST_CHECK_EQUAL (spans[0].size (),0); + + BOOST_CHECK_EQUAL (spans[1].first(),0); + BOOST_CHECK_EQUAL (spans[1].step (),0); + BOOST_CHECK_EQUAL (spans[1].last (),0); + BOOST_CHECK_EQUAL (spans[1].size (),1); + + BOOST_CHECK_EQUAL (spans[2].first(),0); + BOOST_CHECK_EQUAL (spans[2].step (),1); + BOOST_CHECK_EQUAL (spans[2].last (),0); + BOOST_CHECK_EQUAL (spans[2].size (),1); + + BOOST_CHECK_EQUAL (spans[3].first(),0); + BOOST_CHECK_EQUAL (spans[3].step (),1); + BOOST_CHECK_EQUAL (spans[3].last (),2); + BOOST_CHECK_EQUAL (spans[3].size (),3); + + BOOST_CHECK_EQUAL (spans[4].first(),1); + BOOST_CHECK_EQUAL (spans[4].step (),1); + BOOST_CHECK_EQUAL (spans[4].last (),2); + BOOST_CHECK_EQUAL (spans[4].size (),2); + + BOOST_CHECK_EQUAL (spans[5].first(),0); + BOOST_CHECK_EQUAL (spans[5].step (),2); + BOOST_CHECK_EQUAL (spans[5].last (),4); + BOOST_CHECK_EQUAL (spans[5].size (),3); + + BOOST_CHECK_EQUAL (spans[6].first(),1); + BOOST_CHECK_EQUAL (spans[6].step (),2); + BOOST_CHECK_EQUAL (spans[6].last (),3); + BOOST_CHECK_EQUAL (spans[6].size (),2); + + BOOST_CHECK_EQUAL (spans[7].first(),1); + BOOST_CHECK_EQUAL (spans[7].step (),3); + BOOST_CHECK_EQUAL (spans[7].last (),4); + BOOST_CHECK_EQUAL (spans[7].size (),2); + + BOOST_CHECK_EQUAL (spans[8].first(),1); + BOOST_CHECK_EQUAL (spans[8].step (),3); + BOOST_CHECK_EQUAL (spans[8].last (),7); + BOOST_CHECK_EQUAL (spans[8].size (),3); + + + BOOST_CHECK_THROW ( span_type( 1,0,3 ), std::runtime_error ); + BOOST_CHECK_THROW ( span_type( 1,2,0 ), std::runtime_error ); + +} + + + +BOOST_FIXTURE_TEST_CASE( copy_ctor_test, fixture ) +{ + using span_type = boost::numeric::ublas::strided_span; + + + BOOST_CHECK_EQUAL (span_type(spans[0]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[0]).step (),0); + BOOST_CHECK_EQUAL (span_type(spans[0]).last (),0); + BOOST_CHECK_EQUAL (span_type(spans[0]).size (),0); + + BOOST_CHECK_EQUAL (span_type(spans[1]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[1]).step (),0); + BOOST_CHECK_EQUAL (span_type(spans[1]).last (),0); + BOOST_CHECK_EQUAL (span_type(spans[1]).size (),1); + + BOOST_CHECK_EQUAL (span_type(spans[2]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[2]).step (),1); + BOOST_CHECK_EQUAL (span_type(spans[2]).last (),0); + BOOST_CHECK_EQUAL (span_type(spans[2]).size (),1); + + BOOST_CHECK_EQUAL (span_type(spans[3]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[3]).step (),1); + BOOST_CHECK_EQUAL (span_type(spans[3]).last (),2); + BOOST_CHECK_EQUAL (span_type(spans[3]).size (),3); + + BOOST_CHECK_EQUAL (span_type(spans[4]).first(),1); + BOOST_CHECK_EQUAL (span_type(spans[4]).step (),1); + BOOST_CHECK_EQUAL (span_type(spans[4]).last (),2); + BOOST_CHECK_EQUAL (span_type(spans[4]).size (),2); + + + BOOST_CHECK_EQUAL (span_type(spans[5]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[5]).step (),2); + BOOST_CHECK_EQUAL (span_type(spans[5]).last (),4); + BOOST_CHECK_EQUAL (span_type(spans[5]).size (),3); + + BOOST_CHECK_EQUAL (span_type(spans[6]).first(),1); + BOOST_CHECK_EQUAL (span_type(spans[6]).step (),2); + BOOST_CHECK_EQUAL (span_type(spans[6]).last (),3); + BOOST_CHECK_EQUAL (span_type(spans[6]).size (),2); + + BOOST_CHECK_EQUAL (span_type(spans[7]).first(),1); + BOOST_CHECK_EQUAL (span_type(spans[7]).step (),3); + BOOST_CHECK_EQUAL (span_type(spans[7]).last (),4); + BOOST_CHECK_EQUAL (span_type(spans[7]).size (),2); + + BOOST_CHECK_EQUAL (span_type(spans[8]).first(),1); + BOOST_CHECK_EQUAL (span_type(spans[8]).step (),3); + BOOST_CHECK_EQUAL (span_type(spans[8]).last (),7); + BOOST_CHECK_EQUAL (span_type(spans[8]).size (),3); + + +} + + +BOOST_FIXTURE_TEST_CASE( assignment_operator_test, fixture ) +{ + auto c0 = spans[1]; + BOOST_CHECK_EQUAL ((c0=spans[0]).first(),0); + BOOST_CHECK_EQUAL ((c0=spans[0]).step (),0); + BOOST_CHECK_EQUAL ((c0=spans[0]).last (),0); + BOOST_CHECK_EQUAL ((c0=spans[0]).size (),0); + + auto c1 = spans[2]; + BOOST_CHECK_EQUAL ((c1=spans[1]).first(),0); + BOOST_CHECK_EQUAL ((c1=spans[1]).step (),0); + BOOST_CHECK_EQUAL ((c1=spans[1]).last (),0); + BOOST_CHECK_EQUAL ((c1=spans[1]).size (),1); + + auto c2 = spans[3]; + BOOST_CHECK_EQUAL ((c2=spans[2]).first(),0); + BOOST_CHECK_EQUAL ((c2=spans[2]).step (),1); + BOOST_CHECK_EQUAL ((c2=spans[2]).last (),0); + BOOST_CHECK_EQUAL ((c2=spans[2]).size (),1); + + auto c3 = spans[4]; + BOOST_CHECK_EQUAL ((c3=spans[3]).first(),0); + BOOST_CHECK_EQUAL ((c3=spans[3]).step (),1); + BOOST_CHECK_EQUAL ((c3=spans[3]).last (),2); + BOOST_CHECK_EQUAL ((c3=spans[3]).size (),3); + + auto c4 = spans[5]; + BOOST_CHECK_EQUAL ((c4=spans[4]).first(),1); + BOOST_CHECK_EQUAL ((c4=spans[4]).step (),1); + BOOST_CHECK_EQUAL ((c4=spans[4]).last (),2); + BOOST_CHECK_EQUAL ((c4=spans[4]).size (),2); + + auto c5 = spans[6]; + BOOST_CHECK_EQUAL ((c5=spans[5]).first(),0); + BOOST_CHECK_EQUAL ((c5=spans[5]).step (),2); + BOOST_CHECK_EQUAL ((c5=spans[5]).last (),4); + BOOST_CHECK_EQUAL ((c5=spans[5]).size (),3); + + auto c6 = spans[7]; + BOOST_CHECK_EQUAL ((c6=spans[6]).first(),1); + BOOST_CHECK_EQUAL ((c6=spans[6]).step (),2); + BOOST_CHECK_EQUAL ((c6=spans[6]).last (),3); + BOOST_CHECK_EQUAL ((c6=spans[6]).size (),2); + + auto c7 = spans[8]; + BOOST_CHECK_EQUAL ((c7=spans[7]).first(),1); + BOOST_CHECK_EQUAL ((c7=spans[7]).step (),3); + BOOST_CHECK_EQUAL ((c7=spans[7]).last (),4); + BOOST_CHECK_EQUAL ((c7=spans[7]).size (),2); + +} + +BOOST_FIXTURE_TEST_CASE( access_operator_test, fixture ) +{ + + BOOST_CHECK_EQUAL(spans[0][0], 0); + + BOOST_CHECK_EQUAL(spans[1][0], 0); + + BOOST_CHECK_EQUAL(spans[2][0], 0); + + BOOST_CHECK_EQUAL(spans[3][0], 0); + BOOST_CHECK_EQUAL(spans[3][1], 1); + BOOST_CHECK_EQUAL(spans[3][2], 2); + + BOOST_CHECK_EQUAL(spans[4][0], 1); + BOOST_CHECK_EQUAL(spans[4][1], 2); + + BOOST_CHECK_EQUAL(spans[5][0], 0); + BOOST_CHECK_EQUAL(spans[5][1], 2); + BOOST_CHECK_EQUAL(spans[5][2], 4); + + BOOST_CHECK_EQUAL(spans[6][0], 1); + BOOST_CHECK_EQUAL(spans[6][1], 3); + + BOOST_CHECK_EQUAL(spans[7][0], 1); + BOOST_CHECK_EQUAL(spans[7][1], 4); + + BOOST_CHECK_EQUAL(spans[8][0], 1); + BOOST_CHECK_EQUAL(spans[8][1], 4); + BOOST_CHECK_EQUAL(spans[8][2], 7); + +} + +BOOST_FIXTURE_TEST_CASE( ran_test, fixture ) +{ + using namespace boost::numeric::ublas; + + BOOST_CHECK ( ( ran(0,0,0) == spans[0]) ); + + BOOST_CHECK ( ( ran(0,1,0) == spans[2]) ); + BOOST_CHECK ( ( ran(0, 0) == spans[2]) ); + + + BOOST_CHECK ( ( ran(0,1,2) == spans[3]) ); + BOOST_CHECK ( ( ran(0, 2) == spans[3]) ); + + BOOST_CHECK ( ( ran(1,1,2) == spans[4]) ); + BOOST_CHECK ( ( ran(1, 2) == spans[4]) ); + + BOOST_CHECK ( ( ran(0,2,4) == spans[5]) ); + BOOST_CHECK ( ( ran(1,2,4) == spans[6]) ); + BOOST_CHECK ( ( ran(1,3,5) == spans[7]) ); + BOOST_CHECK ( ( ran(1,3,7) == spans[8]) ); +} + +BOOST_AUTO_TEST_SUITE_END(); diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp new file mode 100644 index 000000000..141496fa1 --- /dev/null +++ b/test/tensor/test_subtensor.cpp @@ -0,0 +1,575 @@ +// Copyright (c) 2018 Cem Bassoy +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which started as a Google Summer of Code project. +// + + +#include +#include + +#include "utility.hpp" +#include +#include +#include +#include + + + +BOOST_AUTO_TEST_SUITE ( subtensor_testsuite/*, + *boost::unit_test::depends_on("tensor_testsuite") + *boost::unit_test::depends_on("span_testsuite") + *boost::unit_test::depends_on("subtensor_utility_testsuite")*/) ; + +// double,std::complex + + + +using test_types = zip::with_t; + + + +struct fixture_shape +{ + using shape = boost::numeric::ublas::basic_extents; + + fixture_shape() : extents{ + shape{}, // 0 + shape{1,1}, // 1 + shape{1,2}, // 2 + shape{2,1}, // 3 + shape{2,3}, // 4 + shape{2,3,1}, // 5 + shape{4,1,3}, // 6 + shape{1,2,3}, // 7 + shape{4,2,3}, // 8 + shape{4,2,3,5} // 9 + } + {} + std::vector extents; +}; + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixture_shape ) +{ + + namespace ub = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ub::dynamic_tensor; + using subtensor_type = ub::subtensor; + + + auto check = [](auto const& e) { + auto t = tensor_type{e}; + auto s = subtensor_type(t); + BOOST_CHECK_EQUAL ( s.size() , t.size() ); + BOOST_CHECK_EQUAL ( s.rank() , t.rank() ); + if(e.empty()) { + BOOST_CHECK_EQUAL ( s.empty(), t.empty() ); + BOOST_CHECK_EQUAL ( s. data(), t. data() ); + } + else{ + BOOST_CHECK_EQUAL ( !s.empty(), !t.empty() ); + BOOST_CHECK_EQUAL ( s. data(), t. data() ); + } + }; + + for(auto const& e : extents) + check(e); + +} + + + +BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) +{ + + namespace ub = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ub::dynamic_tensor; + using subtensor_type = ub::subtensor; + using span = ub::sliced_span; + + + { + auto A = tensor_type{}; + auto Asub = subtensor_type( A ); + + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } + + + + { + auto A = tensor_type{1,1}; + auto Asub = subtensor_type( A, 0, 0 ); + + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } + + + { + auto A = tensor_type{1,2}; + auto Asub = subtensor_type( A, 0, span{} ); + + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } + { + auto A = tensor_type{1,2}; + auto Asub = subtensor_type( A, 0, 1 ); + + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), 1 ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), 1 ); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), 1 ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), 1 ); + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } + + + { + auto A = tensor_type{2,3}; + auto Asub = subtensor_type( A, 0, 1 ); + auto B = tensor_type(Asub.extents()); + + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } + + { + auto A = tensor_type{4,3}; + auto Asub = subtensor_type( A, span(1,2), span(1,ub::end) ); + auto B = tensor_type(Asub.extents()); + + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } + + { + auto A = tensor_type{4,3,5}; + auto Asub = subtensor_type( A, span(1,2), span(1,ub::end), span(2,4) ); + + auto B = tensor_type(Asub.extents()); + + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(2), A.strides().at(2) ); + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(2) , 3 ); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(2), B.strides().at(2) ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1)+ + Asub.spans().at(2).first()*A.strides().at(2)); + } + +} + +#if 0 + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_copy_ctor_test, value, test_types, fixture_shape ) +{ + namespace ub = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ub::dynamic_tensor; + using subtensor_type = ub::subtensor; + using span = ub::sliced_span; + + auto check = [](auto const& e) + { + auto r = tensor_type{e}; + auto t = r; + BOOST_CHECK_EQUAL ( t.size() , r.size() ); + BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); + BOOST_CHECK ( t.strides() == r.strides() ); + BOOST_CHECK ( t.extents() == r.extents() ); + + if(e.empty()) { + BOOST_CHECK ( t.empty() ); + BOOST_CHECK_EQUAL ( t.data() , nullptr); + } + else{ + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + } + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], r[i] ); + }; + + for(auto const& e : extents) + check(e); +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_types, fixture_shape ) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + using other_layout_type = std::conditional_t::value, ublas::tag::last_order, ublas::tag::first_order>; + using other_tensor_type = ublas::tensor; + + + for(auto const& e : extents) + { + auto r = tensor_type{e}; + other_tensor_type t = r; + tensor_type q = t; + + BOOST_CHECK_EQUAL ( t.size() , r.size() ); + BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); + BOOST_CHECK ( t.extents() == r.extents() ); + + BOOST_CHECK_EQUAL ( q.size() , r.size() ); + BOOST_CHECK_EQUAL ( q.rank() , r.rank() ); + BOOST_CHECK ( q.strides() == r.strides() ); + BOOST_CHECK ( q.extents() == r.extents() ); + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( q[i], r[i] ); + } +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types, fixture ) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + auto check = [](auto const& e) + { + auto r = tensor_type{e}; + auto t = std::move(r); + BOOST_CHECK_EQUAL ( t.size() , e.product() ); + BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + + if(e.empty()) { + BOOST_CHECK ( t.empty() ); + BOOST_CHECK_EQUAL ( t.data() , nullptr); + } + else{ + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + } + + }; + + for(auto const& e : extents) + check(e); +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_types, fixture ) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + std::random_device device{}; + std::minstd_rand0 generator(device()); + + using distribution_type = std::conditional_t, std::uniform_int_distribution<>, std::uniform_real_distribution<> >; + auto distribution = distribution_type(1,6); + + for(auto const& e : extents){ + auto r = static_cast(distribution(generator)); + auto t = tensor_type{e,r}; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], r ); + } +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + using array_type = typename tensor_type::array_type; + + for(auto const& e : extents) { + auto a = array_type(e.product()); + auto v = value_type {}; + + for(auto& aa : a){ + aa = v; + v += value_type{1}; + } + auto t = tensor_type{e, a}; + v = value_type{}; + + for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}) + BOOST_CHECK_EQUAL( t[i], v); + } +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + for(auto const& e : extents) { + auto t = tensor_type{e}; + auto v = value_type {}; + for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}){ + t[i] = v; + BOOST_CHECK_EQUAL( t[i], v ); + + t(i) = v; + BOOST_CHECK_EQUAL( t(i), v ); + } + } +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + auto check1 = [](const tensor_type& t) + { + auto v = value_type{}; + for(auto k = 0ul; k < t.size(); ++k){ + BOOST_CHECK_EQUAL(t[k], v); + v+=value_type{1}; + } + }; + + auto check2 = [](const tensor_type& t) + { + std::array k; + auto r = std::is_same_v ? 1 : 0; + auto q = std::is_same_v ? 1 : 0; + auto v = value_type{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[q] = 0ul; k[q] < t.size(q); ++k[q]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1]), v); + v+=value_type{1}; + } + } + }; + + auto check3 = [](const tensor_type& t) + { + std::array k; + using op_type = std::conditional_t, std::minus<>, std::plus<>>; + auto r = std::is_same_v ? 2 : 0; + auto o = op_type{}; + auto v = value_type{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ + for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2]), v); + v+=value_type{1}; + } + } + } + }; + + auto check4 = [](const tensor_type& t) + { + std::array k; + using op_type = std::conditional_t, std::minus<>, std::plus<>>; + auto r = std::is_same_v ? 3 : 0; + auto o = op_type{}; + auto v = value_type{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ + for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ + for(k[o(r,3)] = 0ul; k[o(r,3)] < t.size(o(r,3)); ++k[o(r,3)]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2],k[3]), v); + v+=value_type{1}; + } + } + } + } + }; + + auto check = [check1,check2,check3,check4](auto const& e) { + auto t = tensor_type{e}; + auto v = value_type {}; + for(auto i = 0ul; i < t.size(); ++i){ + t[i] = v; + v+=value_type{1}; + } + + if(t.rank() == 1) check1(t); + else if(t.rank() == 2) check2(t); + else if(t.rank() == 3) check3(t); + else if(t.rank() == 4) check4(t); + + }; + + for(auto const& e : extents) + check(e); +} + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + + for(auto const& efrom : extents){ + for(auto const& eto : extents){ + + auto v = value_type {}; + v+=value_type{1}; + auto t = tensor_type{efrom, v}; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], v ); + + t.reshape(eto); + for(auto i = 0ul; i < std::min(efrom.product(),eto.product()); ++i) + BOOST_CHECK_EQUAL( t[i], v ); + + BOOST_CHECK_EQUAL ( t.size() , eto.product() ); + BOOST_CHECK_EQUAL ( t.rank() , eto.size() ); + BOOST_CHECK ( t.extents() == eto ); + + if(efrom != eto){ + for(auto i = efrom.product(); i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], value_type{} ); + } + } + } +} + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + for(auto const& e_t : extents){ + for(auto const& e_r : extents) { + + auto v = value_type {} + value_type{1}; + auto w = value_type {} + value_type{2}; + auto t = tensor_type{e_t, v}; + auto r = tensor_type{e_r, w}; + + std::swap( r, t ); + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], w ); + + BOOST_CHECK_EQUAL ( t.size() , e_r.product() ); + BOOST_CHECK_EQUAL ( t.rank() , e_r.size() ); + BOOST_CHECK ( t.extents() == e_r ); + + for(auto i = 0ul; i < r.size(); ++i) + BOOST_CHECK_EQUAL( r[i], v ); + + BOOST_CHECK_EQUAL ( r.size() , e_t.product() ); + BOOST_CHECK_EQUAL ( r.rank() , e_t.size() ); + BOOST_CHECK ( r.extents() == e_t ); + + + } + } +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + for(auto const& e : extents) + { + auto v = value_type {} + value_type{1}; + auto t = tensor_type{e, v}; + + BOOST_CHECK_EQUAL( std::distance(t.begin(), t.end ()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.rbegin(), t.rend()), t.size() ); + + BOOST_CHECK_EQUAL( std::distance(t.cbegin(), t.cend ()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.crbegin(), t.crend()), t.size() ); + + if(t.size() > 0) { + BOOST_CHECK( t.data() == std::addressof( *t.begin () ) ) ; + BOOST_CHECK( t.data() == std::addressof( *t.cbegin() ) ) ; + } + } +} + +#endif + +BOOST_AUTO_TEST_SUITE_END(); diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp new file mode 100644 index 000000000..e8c5aed19 --- /dev/null +++ b/test/tensor/test_subtensor_utility.cpp @@ -0,0 +1,394 @@ +// Copyright (c) 2018 Cem Bassoy +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which started as a Google Summer of Code project. +// + + +#include +#include + +#include "utility.hpp" +#include +#include +#include +#include +#include + + + +BOOST_AUTO_TEST_SUITE ( subtensor_utility_testsuite ) ; + + + +struct fixture_sliced_span { + using span_type = boost::numeric::ublas::sliced_span; + + fixture_sliced_span() + : spans{ + span_type(), // 0, a(:) + span_type(0,0), // 1, a(0:0) + span_type(0,2), // 2, a(0:2) + span_type(1,1), // 3, a(1:1) + span_type(1,3), // 4, a(1:3) + span_type(1,boost::numeric::ublas::end), // 5, a(1:end) + span_type(boost::numeric::ublas::end) // 6, a(end) + } + {} + std::vector spans; +}; + + +BOOST_FIXTURE_TEST_CASE( transform_sliced_span_test, fixture_sliced_span ) +{ + + using namespace boost::numeric; + +// template + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(2) ) == ublas::sliced_span(0,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(3) ) == ublas::sliced_span(0,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(4) ) == ublas::sliced_span(0,3) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(2) ) == ublas::sliced_span(0,0) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(3) ) == ublas::sliced_span(0,0) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(4) ) == ublas::sliced_span(0,0) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(3) ) == ublas::sliced_span(0,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(4) ) == ublas::sliced_span(0,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(5) ) == ublas::sliced_span(0,2) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(2) ) == ublas::sliced_span(1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(3) ) == ublas::sliced_span(1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(4) ) == ublas::sliced_span(1,1) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(4) ) == ublas::sliced_span(1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(5) ) == ublas::sliced_span(1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(6) ) == ublas::sliced_span(1,3) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(4) ) == ublas::sliced_span(1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(5) ) == ublas::sliced_span(1,4) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(6) ) == ublas::sliced_span(1,5) ); + + + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(4) ) == ublas::sliced_span(3,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(5) ) == ublas::sliced_span(4,4) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(6) ) == ublas::sliced_span(5,5) ); +} + + +struct fixture_strided_span { + using span_type = boost::numeric::ublas::strided_span; + + fixture_strided_span() + : spans{ + span_type(), // 0, a(:) + span_type(0,1,0), // 1, a(0:1:0) + span_type(0,2,2), // 2, a(0:2:2) + span_type(1,1,1), // 3, a(1:1:1) + span_type(1,1,3), // 4, a(1:1:3) + span_type(1,2,boost::numeric::ublas::end), // 5, a(1:2:end) + span_type(boost::numeric::ublas::end) // 6, a(end) + } + {} + std::vector spans; +}; + + +BOOST_FIXTURE_TEST_CASE( transform_strided_span_test, fixture_strided_span ) +{ + + using namespace boost::numeric; + +// template + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(2) ) == ublas::strided_span(0,1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(3) ) == ublas::strided_span(0,1,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(4) ) == ublas::strided_span(0,1,3) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(2) ) == ublas::strided_span(0,1,0) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(3) ) == ublas::strided_span(0,1,0) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(4) ) == ublas::strided_span(0,1,0) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(3) ) == ublas::strided_span(0,2,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(4) ) == ublas::strided_span(0,2,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(5) ) == ublas::strided_span(0,2,2) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(2) ) == ublas::strided_span(1,1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(3) ) == ublas::strided_span(1,1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(4) ) == ublas::strided_span(1,1,1) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(4) ) == ublas::strided_span(1,1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(5) ) == ublas::strided_span(1,1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(6) ) == ublas::strided_span(1,1,3) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(4) ) == ublas::strided_span(1,2,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(5) ) == ublas::strided_span(1,2,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(6) ) == ublas::strided_span(1,2,5) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(4) ) == ublas::strided_span(3,1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(5) ) == ublas::strided_span(4,1,4) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(6) ) == ublas::strided_span(5,1,5) ); +} + + + + + + +struct fixture_shape { + using shape = boost::numeric::ublas::basic_extents; + + fixture_shape() : extents{ + shape{}, // 0 + shape{1,1}, // 1 + shape{1,2}, // 2 + shape{2,1}, // 3 + shape{2,3}, // 4 + shape{2,3,1}, // 5 + shape{4,1,3}, // 6 + shape{1,2,3}, // 7 + shape{4,2,3}, // 8 + shape{4,2,3,5} // 9 + } + {} + std::vector extents; +}; + +BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) +{ + using namespace boost::numeric::ublas; + using span = sliced_span; + + // shape{} + { + auto v = detail::generate_span_array(extents[0]); + auto r = std::vector{}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + + // shape{1,1} + { + auto v = detail::generate_span_array(extents[1],span(),span()); + auto r = std::vector{span(0,0),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,1} + { + auto v = detail::generate_span_array(extents[1],end,span(end)); + auto r = std::vector{span(0,0),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,1} + { + auto v = detail::generate_span_array(extents[1],0,end); + auto r = std::vector{span(0,0),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,2} + { + auto v = detail::generate_span_array(extents[2],0,end); + auto r = std::vector{span(0,0),span(1,1)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,2} + { + auto v = detail::generate_span_array(extents[2],0,1); + auto r = std::vector{span(0,0),span(1,1)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + { + auto v = detail::generate_span_array(extents[2],span(),span()); + auto r = std::vector{span(0,0),span(0,1)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{2,3} + { + auto v = detail::generate_span_array(extents[4],span(),span()); + auto r = std::vector{span(0,1),span(0,2)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + { + auto v = detail::generate_span_array(extents[4],1,span(1,end)); + auto r = std::vector{span(1,1),span(1,2)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{2,3,1} + { + auto v = detail::generate_span_array(extents[5],span(),span(),0); + auto r = std::vector{span(0,1),span(0,2),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + { + auto v = detail::generate_span_array(extents[5],1,span(),end); + auto r = std::vector{span(1,1),span(0,2),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } +} + + + +struct fixture_span_vector_shape { + using shape = boost::numeric::ublas::basic_extents; + using span = boost::numeric::ublas::sliced_span; + + + fixture_span_vector_shape() + : extents_{ + shape{}, // 0 + shape{1,1}, // 1 + shape{1,2}, // 2 + shape{2,3}, // 3 + shape{4,2,3}, // 4 + shape{4,2,3,5} // 5 + } + , span_vectors_{ + /*A(:)*/ boost::numeric::ublas::detail::generate_span_array(extents_[0]), + /*A(0,0)*/ boost::numeric::ublas::detail::generate_span_array(extents_[1],0,0), + /*A(0,:)*/ boost::numeric::ublas::detail::generate_span_array(extents_[2],0,span()), + /*A(1,1:2)*/ boost::numeric::ublas::detail::generate_span_array(extents_[3],1,span(1,2)), + /*A(1:3,1,1:2)*/ boost::numeric::ublas::detail::generate_span_array(extents_[4],span(1,3),1,span(0,1)), + /*A(1:3,1,0:1,2:4)*/ boost::numeric::ublas::detail::generate_span_array(extents_[5],span(1,3),1,span(0,1),span(2,4)), + } + , reference_ { + shape{}, + shape{1,1}, + shape{1,2}, + shape{1,2}, + shape{3,1,2}, + shape{3,1,2,3} + } + { + assert(extents_.size() == reference_.size()); + assert(extents_.size() == std::tuple_size_v); + } + std::array extents_; + std::tuple< + std::array, + std::array, + std::array, + std::array, + std::array, + std::array + > span_vectors_; + + std::array reference_; +}; + + + +BOOST_FIXTURE_TEST_CASE( extents_test, fixture_span_vector_shape ) +{ + using namespace boost::numeric; + + BOOST_CHECK ( std::equal( std::get<0>(reference_).begin(), std::get<0>(reference_).end(), ublas::detail::compute_extents( std::get<0>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( std::get<1>(reference_).begin(), std::get<1>(reference_).end(), ublas::detail::compute_extents( std::get<1>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( std::get<2>(reference_).begin(), std::get<2>(reference_).end(), ublas::detail::compute_extents( std::get<2>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( std::get<3>(reference_).begin(), std::get<3>(reference_).end(), ublas::detail::compute_extents( std::get<3>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( std::get<4>(reference_).begin(), std::get<4>(reference_).end(), ublas::detail::compute_extents( std::get<4>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( std::get<5>(reference_).begin(), std::get<5>(reference_).end(), ublas::detail::compute_extents( std::get<5>(span_vectors_) ).begin() ) ); + +} + + +using test_types = std::tuple; + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( offset_test, layout, test_types, fixture_span_vector_shape ) +{ + using namespace boost::numeric; + using strides = ublas::basic_strides; + + { + auto s = std::get<0>(span_vectors_); + auto w = strides( std::get<0>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, 0 ); + } + + { + auto s = std::get<1>(span_vectors_); + auto w = strides( std::get<1>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, 0 ); + } + + { + auto s = std::get<2>(span_vectors_); + auto w = strides( std::get<2>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, 0 ); + } + + { + auto s = std::get<3>(span_vectors_); + auto w = strides( std::get<3>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] ); + } + + { + auto s = std::get<4>(span_vectors_); + auto w = strides( std::get<4>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] ); + } + + + { + auto s = std::get<5>(span_vectors_); + auto w = ( std::get<5>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] + s[3].first()*w[3] ); + } + +} + + +#if 0 + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( span_strides_test, layout, test_types, fixture_span_vector_shape ) +{ + + /*A(:)*/ + /*A(0,0)*/ + /*A(0,:)*/ + /*A(1,1:2)*/ + /*A(1:3,1,1:2)*/ + /*A(1:3,1,0:1,2:4)*/ + + + // auto span_strides(strides_type const& strides, std::vector const& spans) + + using namespace boost::numeric; + using strides = ublas::basic_strides; + + for(unsigned k = 0; k < span_vectors_.size(); ++k) + { + auto s = span_vectors_[k]; + auto w = strides( extents_[k] ); + auto ss = ublas::detail::span_strides( w, s ); + for(unsigned i = 0; i < w.size(); ++i) + BOOST_CHECK_EQUAL( ss[i], w[i]*s[i].step() ); + } + +} + +#endif + +BOOST_AUTO_TEST_SUITE_END(); From 94ae6a7f997ad93b47c172c7eea67dc4f6708575 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Thu, 4 Jun 2020 19:58:32 +0200 Subject: [PATCH 2/9] add subtensor test --- .../boost/numeric/ublas/tensor/subtensor.hpp | 126 ++++++------------ test/tensor/test_subtensor.cpp | 58 +++++--- 2 files changed, 79 insertions(+), 105 deletions(-) diff --git a/include/boost/numeric/ublas/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/subtensor.hpp index d03130610..5b07e9c5f 100644 --- a/include/boost/numeric/ublas/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor.hpp @@ -156,84 +156,51 @@ class subtensor > /** @brief Constructs a tensor view from a tensor without any range. * - * @note can be regarded as a pointer to a tensor + * @note is similar to a handle to a tensor */ explicit subtensor (tensor_type const& t) - : super_type () - , spans_() - , extents_ (t.extents()) - , strides_ (t.strides()) - , span_strides_(t.strides()) - , data_ (t.data()) + : super_type () + , spans_ () + , extents_ (t.extents()) + , strides_ (t.strides()) + , span_strides_ (t.strides()) + , data_ (t.data()) { } -#if 0 - /** @brief Constructs a tensor with a \c shape and initiates it with one-dimensional data - * - * @code tensor A{extents{4,2,3}, array }; @endcode - * - * - * @param s initial tensor dimension extents - * @param a container of \c array_type that is copied according to the storage layout - */ - BOOST_UBLAS_INLINE - tensor (extents_type const& s, const array_type &a) - : tensor_expression_type() //tensor_container() - , extents_ (s) - , strides_ (extents_) - , data_ (a) - { - if(this->extents_.product() != this->data_.size()) - throw std::runtime_error("Error in boost::numeric::ublas::tensor: size of provided data and specified extents do not match."); - } - - - - /** @brief Constructs a tensor using a shape tuple and initiates it with a value. - * - * @code tensor A{extents{4,2,3}, 1 }; @endcode - * - * @param e initial tensor dimension extents - * @param i initial value of all elements of type \c value_type - */ - BOOST_UBLAS_INLINE - tensor (extents_type const& e, const value_type &i) - : tensor_expression_type() //tensor_container () - , extents_ (e) - , strides_ (extents_) - , data_ (extents_.product(), i) - {} - /** @brief Constructs a tensor from another tensor * * @param v tensor to be copied. */ - BOOST_UBLAS_INLINE - tensor (const tensor &v) - : tensor_expression_type() - , extents_ (v.extents_) - , strides_ (v.strides_) - , data_ (v.data_ ) + inline + subtensor (const subtensor &v) + : super_type () + , spans_ (v.spans_) + , extents_ (v.extents_) + , strides_ (v.strides_) + , span_strides_ (v.span_strides_) + , data_ (v.data_) {} - /** @brief Constructs a tensor from another tensor * * @param v tensor to be moved. */ BOOST_UBLAS_INLINE - tensor (tensor &&v) - : tensor_expression_type() //tensor_container () - , extents_ (std::move(v.extents_)) - , strides_ (std::move(v.strides_)) - , data_ (std::move(v.data_ )) + subtensor (subtensor &&v) + : super_type () + , spans_ (std::move(v.spans_)) + , extents_ (std::move(v.extents_)) + , strides_ (std::move(v.strides_)) + , span_strides_ (std::move(v.span_strides_)) + , data_ (std::move(v.data_)) {} +#if 0 /** @brief Constructs a tensor with a matrix * @@ -415,86 +382,74 @@ class subtensor > /** @brief Returns true if the subtensor is empty (\c size==0) */ - BOOST_UBLAS_INLINE - bool empty () const { - return this->size() == size_type(0); + inline bool empty () const { + return this->size() == 0ul; } /** @brief Returns the size of the subtensor */ - BOOST_UBLAS_INLINE - size_type size () const { + inline size_type size () const { return product(this->extents_); } /** @brief Returns the size of the subtensor */ - BOOST_UBLAS_INLINE - size_type size (size_type r) const { + inline size_type size (size_type r) const { return this->extents_.at(r); } /** @brief Returns the number of dimensions/modes of the subtensor */ - BOOST_UBLAS_INLINE - size_type rank () const { + inline size_type rank () const { return this->extents_.size(); } /** @brief Returns the number of dimensions/modes of the subtensor */ - BOOST_UBLAS_INLINE - size_type order () const { + inline size_type order () const { return this->extents_.size(); } /** @brief Returns the strides of the subtensor */ - BOOST_UBLAS_INLINE - auto const& strides () const { + inline auto const& strides () const { return this->strides_; } /** @brief Returns the span strides of the subtensor */ - BOOST_UBLAS_INLINE - auto const& span_strides () const { + inline auto const& span_strides () const { return this->span_strides_; } /** @brief Returns the span strides of the subtensor */ - BOOST_UBLAS_INLINE - auto const& spans () const { + inline auto const& spans () const { return this->spans_; } /** @brief Returns the extents of the subtensor */ - BOOST_UBLAS_INLINE - auto const& extents () const { + inline auto const& extents () const { return this->extents_; } /** @brief Returns a \c const reference to the container. */ - BOOST_UBLAS_INLINE - const_pointer data () const { + inline const_pointer data () const { return this->data_; } /** @brief Returns a \c const reference to the container. */ - BOOST_UBLAS_INLINE - pointer data () { + inline pointer data () { return this->data_; } -#if 0 + /** @brief Element access using a single index. * * @code auto a = A[i]; @endcode * * @param i zero-based index where 0 <= i < this->size() */ - BOOST_UBLAS_INLINE - const_reference operator [] (size_type i) const { + inline const_reference operator [] (size_type i) const { return this->data_[i]; } @@ -505,13 +460,12 @@ class subtensor > * * @param i zero-based index where 0 <= i < this->size() */ - BOOST_UBLAS_INLINE - reference operator [] (size_type i) + inline reference operator [] (size_type i) { - return this->data_[i]; + return this->data_[i]; } - +#if 0 /** @brief Element access using a multi-index or single-index. * * diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index 141496fa1..24788ce6b 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -21,16 +21,13 @@ -BOOST_AUTO_TEST_SUITE ( subtensor_testsuite/*, - *boost::unit_test::depends_on("tensor_testsuite") - *boost::unit_test::depends_on("span_testsuite") - *boost::unit_test::depends_on("subtensor_utility_testsuite")*/) ; +BOOST_AUTO_TEST_SUITE ( subtensor_testsuite ) ; // double,std::complex -using test_types = zip::with_t; +using test_types = zip>::with_t; @@ -213,9 +210,9 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) } -#if 0 -BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_copy_ctor_test, value, test_types, fixture_shape ) + +BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, fixture_shape ) { namespace ub = boost::numeric::ublas; using value_type = typename value::first_type; @@ -224,32 +221,55 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_copy_ctor_test, value, test_types, using subtensor_type = ub::subtensor; using span = ub::sliced_span; + + auto check = [](auto const& e) { - auto r = tensor_type{e}; - auto t = r; - BOOST_CHECK_EQUAL ( t.size() , r.size() ); - BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); - BOOST_CHECK ( t.strides() == r.strides() ); - BOOST_CHECK ( t.extents() == r.extents() ); + + auto A = tensor_type{e}; + value_type i{}; + for(auto & a : A) + a = i+=value_type{1}; + + auto Asub = subtensor_type( A ); + auto Bsub = subtensor_type( A ); + + + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + + BOOST_CHECK( Bsub.span_strides() == A.strides() ); + BOOST_CHECK( Bsub.strides() == A.strides() ); + BOOST_CHECK( Bsub.extents() == A.extents() ); + BOOST_CHECK( Bsub.data() == A.data() ); + + BOOST_CHECK_EQUAL ( Bsub.size() , A.size() ); + BOOST_CHECK_EQUAL ( Bsub.rank() , A.rank() ); + + if(e.empty()) { - BOOST_CHECK ( t.empty() ); - BOOST_CHECK_EQUAL ( t.data() , nullptr); + BOOST_CHECK ( Bsub.empty() ); + BOOST_CHECK_EQUAL ( Bsub.data() , nullptr); } else{ - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); + BOOST_CHECK ( !Bsub.empty() ); + BOOST_CHECK_NE ( Bsub.data() , nullptr); } - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], r[i] ); + for(auto i = 0ul; i < Asub.size(); ++i) + BOOST_CHECK_EQUAL( Asub[i], Bsub[i] ); + }; for(auto const& e : extents) check(e); + } +#if 0 BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_types, fixture_shape ) { From 1513a07ae114e7cfd065314b29076b8ff6b24196 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Fri, 19 Jun 2020 08:25:23 +0200 Subject: [PATCH 3/9] add access functions for tensor and subtensor. --- IDEs/qtcreator/include/include.pro | 1 + IDEs/qtcreator/test/test_tensor.pro | 4 +- IDEs/qtcreator/ublas_develop.pro | 2 + include/boost/numeric/ublas/tensor/access.hpp | 154 ++++++++ .../ublas/tensor/detail/extents_functions.hpp | 332 ++++++++++++++++++ .../numeric/ublas/tensor/dynamic_strides.hpp | 200 +++++++++++ test/tensor/Jamfile | 141 ++++---- test/tensor/test_access.cpp | 305 ++++++++++++++++ test/tensor/test_subtensor_utility.cpp | 8 +- 9 files changed, 1078 insertions(+), 69 deletions(-) create mode 100644 include/boost/numeric/ublas/tensor/access.hpp create mode 100644 include/boost/numeric/ublas/tensor/detail/extents_functions.hpp create mode 100644 include/boost/numeric/ublas/tensor/dynamic_strides.hpp create mode 100644 test/tensor/test_access.cpp diff --git a/IDEs/qtcreator/include/include.pro b/IDEs/qtcreator/include/include.pro index a5aeead8b..3d28ef8f6 100644 --- a/IDEs/qtcreator/include/include.pro +++ b/IDEs/qtcreator/include/include.pro @@ -4,6 +4,7 @@ TARGET = ublas CONFIG += staticlib depend_includepath CONFIG -= qt CONFIG += c++20 + INCLUDE_DIR=../../../include include(detail/detail.pri) diff --git a/IDEs/qtcreator/test/test_tensor.pro b/IDEs/qtcreator/test/test_tensor.pro index 8de6aba84..1c6c216c3 100644 --- a/IDEs/qtcreator/test/test_tensor.pro +++ b/IDEs/qtcreator/test/test_tensor.pro @@ -5,6 +5,8 @@ CONFIG += staticlib depend_includepath console CONFIG -= qt CONFIG += c++20 +CONFIG += c++17 + #QMAKE_CXXFLAGS += -fno-inline QMAKE_CXXFLAGS =-std=c++20 QMAKE_CXXFLAGS +=-Wall -Wpedantic -Wextra @@ -38,6 +40,6 @@ HEADERS += $${TEST_DIR}/utility.hpp SOURCES += $${TEST_DIR}/algorithm/test_*.cpp SOURCES += $${TEST_DIR}/extents/test_*.cpp SOURCES += $${TEST_DIR}/functions/test_*.cpp -SOURCES += $${TEST_DIR}/multiplicatoin/test_*.cpp +SOURCES += $${TEST_DIR}/multiplication/test_*.cpp SOURCES += $${TEST_DIR}/tensor/test_*.cpp SOURCES += $${TEST_DIR}/test_*.cpp diff --git a/IDEs/qtcreator/ublas_develop.pro b/IDEs/qtcreator/ublas_develop.pro index 49fc2d99c..be5315356 100644 --- a/IDEs/qtcreator/ublas_develop.pro +++ b/IDEs/qtcreator/ublas_develop.pro @@ -3,6 +3,8 @@ CONFIG += ordered SUBDIRS = include # examples # benchmarks OTHER_FILES += ../../changelog.txt +CONFIG += c++17 +QMAKE_CXXFLAGS += -std=c++17 #include (tests.pri) diff --git a/include/boost/numeric/ublas/tensor/access.hpp b/include/boost/numeric/ublas/tensor/access.hpp new file mode 100644 index 000000000..e83e588c9 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/access.hpp @@ -0,0 +1,154 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef _BOOST_UBLAS_TENSOR_ACCESS_HPP +#define _BOOST_UBLAS_TENSOR_ACCESS_HPP + + +#include +#include +#include + +namespace boost::numeric::ublas::detail{ + + +/** \brief Computes a single index from multi-index of a tensor or subtensor + * + * \param i iterator to a multi-index vector of length std::distance(begin,end) + * \param ip iterator to a multi-index vector of length std::distance(begin,end) + * \param w iterator to a stride vector of length std::distance(begin,end) or greater +*/ +template +constexpr inline auto compute_single_index(InputIt1 i, InputIt1 ip, InputIt2 w) +{ + return std::inner_product(i,ip,w,0ul,std::plus<>{},std::multiplies<>{}); +} + + +/** \brief Computes a single index from a multi-index of a tensor or subtensor + * + * \param i iterator to a multi-index vector of length std::distance(begin,end) + * \param ip iterator to a multi-index vector of length std::distance(begin,end) + * \param w iterator to a stride vector of length std::distance(begin,end) or greater +*/ +template +constexpr inline auto compute_single_index(InputIt1 i, InputIt1 /*ip*/, InputIt2 w) +{ + if constexpr(p==0u) return 0ul; + else if constexpr(p >1u) return compute_single_index(i,i,w)+i[p-1]*w[p-1]; + else return i[p-1]*w[p-1]; +} + +/** @brief Computes a multi-index from single index of a tensor or subtensor + * + * j = compute_single_index (i, ip, w) + * compute_multi_index (j, w, wp, k) with k == i + * + * @param w begin input iterator to a container with tensor or subtensor strides of length std::distance(begin,end) + * @param wp end input iterator to a container with tensor or subtensor strides of length std::distance(begin,end) + * @param i begin output iterator to a container with tensor or subtensor indices length std::distance(begin,end) or greater +*/ +template +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +{ + if(w == wp) + return; + auto p = std::distance(w,wp); + auto kq = j; + //auto q = 0ul; + + + + for(int r = p-1; r >= 0; --r) + { + //q = l[r]-1; + i[r] = kq/w[r]; + kq -= w[r]*i[r]; + } + + //std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +} + + +template +constexpr inline void compute_multi_index_first(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +{ + if(w == wp) + return; + auto p = std::distance(w,wp); + auto kq = j; + //auto q = 0ul; + + + +// for(int r = p-1; r >= 0; --r) +// { +// //q = l[r]-1; +// i[r] = kq/w[r]; +// kq -= w[r]*i[r]; +// } + + std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +} + +template +constexpr inline void compute_multi_index_last(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +{ + if(w == wp) + return; + auto p = std::distance(w,wp); + auto kq = j; + + for(unsigned r = 0ul; r < p; ++r) { + i[r] = kq/w[r]; + kq -= w[r]*i[r]; + } + + //std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +} + + + +/** @brief Computes a single index from a multi-index of a dense tensor or subtensor + * + * @param j single index that is transformed into a multi-index + * @param w begin input iterator to a container with strides of length p + * @param i begin input iterator to a container with indices of length p or greater +*/ +template +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i) +{ + if constexpr (p==0u) return; + else if constexpr (p >1u) {i[p-1]=j/w[p-1]; compute_multi_index(j-w[p-1]*i[p-1],w,w,i); } + else {i[p-1]=j/w[p-1]; } +} + + +/** @brief Computes a single (relative memory) index of a dense tensor from a single index of one of its subtensor + * + * @param jv single index of a subtensor that is transformed into a single index of a dense tensor + * @param w begin input iterator of a container with tensor strides of length std::distance(w,wp) + * @param wp end input iterator of a container with tensor strides of length std::distance(w,wp) + * @param v begin input iterator of a container with subtensor strides of length std::distance(w,wp) or greater +*/ +template +constexpr inline auto compute_single_index(std::size_t jv, InputIt1 w, InputIt1 wp, InputIt2 v) +{ + return std::inner_product(w,wp,v,0ul, + std::plus<>{}, + [&jv](auto ww, auto vv) { auto k=jv/vv; jv-=vv*k; return ww*k; } + ); +} + +} // namespace + +#endif diff --git a/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp b/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp new file mode 100644 index 000000000..4273829d1 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp @@ -0,0 +1,332 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google +// + +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ +#define _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ + +#include +#include +#include +#include +#include +#include +#include + +namespace boost::numeric::ublas::detail{ + + template + constexpr auto push_back(basic_static_extents) -> basic_static_extents; + + template + constexpr auto push_front(basic_static_extents) -> basic_static_extents; + + template + constexpr auto squeeze_impl_remove_one( basic_static_extents, basic_static_extents num = basic_static_extents{} ){ + // executed when basic_static_extents is size of 1 + // @code basic_static_extents @endcode + if constexpr( sizeof...(E) == 0ul ){ + // if element E0 is 1 we return number list but we do not append + // it to the list + if constexpr( E0 == T(1) ){ + return num; + }else{ + // if element E0 is 1 we return number list but we append + // it to the list + return decltype(push_back(num)){}; + } + }else{ + if constexpr( E0 == T(1) ){ + // if element E0 is 1 we return number list but we do not append + // it to the list + return squeeze_impl_remove_one(basic_static_extents{}, num); + }else{ + // if element E0 is 1 we return number list but we append + // it to the list + auto n_num_list = decltype(push_back(num)){}; + return squeeze_impl_remove_one(basic_static_extents{}, n_num_list); + } + } + } + + template + constexpr auto squeeze_impl( basic_static_extents const& e ){ + + using extents_type = basic_static_extents; + + if constexpr( extents_type::_size <= typename extents_type::size_type(2) ){ + return e; + } + + using value_type = typename extents_type::value_type; + using size_type = typename extents_type::size_type; + + auto one_free_static_extents = squeeze_impl_remove_one(e); + + // check after removing 1s from the list are they same + // if same that means 1s does not exist and no need to + // squeeze + if constexpr( decltype(one_free_static_extents)::_size != extents_type::_size ){ + + // after squeezing, all the extents are 1s we need to + // return extents of (1, 1) + if constexpr( decltype(one_free_static_extents)::_size == size_type(0) ){ + + return basic_static_extents{}; + + }else if constexpr( decltype(one_free_static_extents)::_size == (1) ){ + // to comply with GNU Octave this check is made + // if position 2 contains 1 we push at back + // else we push at front + if constexpr( extents_type::at(1) == value_type(1) ){ + return decltype( push_back(one_free_static_extents) ){}; + }else{ + return decltype( push_front(one_free_static_extents) ){}; + } + + }else{ + return one_free_static_extents; + } + + }else{ + return e; + } + + } + + template + inline + constexpr auto squeeze_impl( basic_extents const& e ){ + using extents_type = basic_extents; + using base_type = typename extents_type::base_type; + using value_type = typename extents_type::value_type; + using size_type = typename extents_type::size_type; + + if( e.size() <= size_type(2) ){ + return e; + } + + auto not_one = [](auto const& el){ + return el != value_type(1); + }; + + // count non one values + size_type size = std::count_if(e.begin(), e.end(), not_one); + + // reserve space + base_type n_extents( std::max(size, size_type(2)), 1 ); + + // copying non 1s to the new extents + std::copy_if(e.begin(), e.end(), n_extents.begin(), not_one); + + // checking if extents size goes blow 2 + // if size of extents goes to 1 + // complying with GNU Octave + // if position 2 contains 1 we + // swap the pos + if( size < size_type(2) && e[1] != value_type(1) ){ + std::swap(n_extents[0], n_extents[1]); + } + + return extents_type(n_extents); + } + + template + inline + auto squeeze_impl( basic_fixed_rank_extents const& e ){ + if constexpr( N <= 2 ){ + return e; + }else{ + return squeeze_impl(basic_extents(e)); + } + } + + + +} // namespace boost::numeric::ublas::detail + +namespace boost::numeric::ublas { + +/** @brief Returns true if size > 1 and all elements > 0 or size == 1 && e[0] == 1 */ +template +[[nodiscard]] inline +constexpr bool valid(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::valid() : invalid type, type should be an extents"); + + auto greater_than_zero = [](auto const& a){ return a > 0u; }; + + if( e.size() == 1u ) return e[0] == 1u; + return !e.empty() && std::all_of(e.begin(), e.end(), greater_than_zero ); +} + +/** + * @code static_extents<4,1,2,3,4> s; + * std::cout< +[[nodiscard]] inline +std::string to_string(T const &e) { + + using value_type = typename T::value_type; + + static_assert(is_extents_v ||is_strides_v, + "boost::numeric::ublas::to_string() : invalid type, type should be an extents or a strides"); + + if ( e.empty() ) return "[]"; + + std::stringstream ss; + + ss << "[ "; + + std::copy( e.begin(), e.end() - 1, std::ostream_iterator(ss,", ") ); + + ss << e.back() << " ]"; + + return ss.str(); +} + +/** @brief Returns true if this has a scalar shape + * + * @returns true if (1,1,[1,...,1]) + */ +template +[[nodiscard]] inline +constexpr bool is_scalar(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::is_scalar() : invalid type, type should be an extents"); + + auto equal_one = [](auto const &a) { return a == 1u; }; + + return !e.empty() && std::all_of(e.begin(), e.end(), equal_one); +} + +/** @brief Returns true if this has a vector shape + * + * @returns true if (1,n,[1,...,1]) or (n,1,[1,...,1]) with n > 1 + */ +template +[[nodiscard]] inline +constexpr bool is_vector(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::is_vector() : invalid type, type should be an extents"); + + auto greater_one = [](auto const &a) { return a > 1u; }; + auto equal_one = [](auto const &a) { return a == 1u; }; + + if (e.size() == 0u) return false; + else if (e.size() == 1u) return e[0] > 1u; + else return std::any_of(e.begin(), e.begin() + 2, greater_one) && + std::any_of(e.begin(), e.begin() + 2, equal_one) && + std::all_of(e.begin() + 2, e.end(), equal_one); + +} + +/** @brief Returns true if this has a matrix shape + * + * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 + */ +template +[[nodiscard]] inline +constexpr bool is_matrix(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::is_matrix() : invalid type, type should be an extents"); + + auto greater_one = [](auto const &a) { return a > 1u; }; + auto equal_one = [](auto const &a) { return a == 1u; }; + + return ( e.size() >= 2u ) && std::all_of(e.begin(), e.begin() + 2, greater_one) && + std::all_of(e.begin() + 2, e.end(), equal_one); +} + + +/** @brief Returns true if this is has a tensor shape + * + * @returns true if !empty() && !is_scalar() && !is_vector() && !is_matrix() + */ +template +[[nodiscard]] inline +constexpr bool is_tensor(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::is_tensor() : invalid type, type should be an extents"); + + auto greater_one = [](auto const &a) { return a > 1u;}; + + return ( e.size() >= 3u ) && std::any_of(e.begin() + 2, e.end(), greater_one); +} + +/** @brief Eliminates singleton dimensions when size > 2 + * + * squeeze { 1,1} -> { 1,1} + * squeeze { 2,1} -> { 2,1} + * squeeze { 1,2} -> { 1,2} + * + * squeeze {1,2,3} -> { 2,3} + * squeeze {2,1,3} -> { 2,3} + * squeeze {1,3,1} -> { 1,3} + * + * @returns basic_extents with squeezed extents + */ +template +[[nodiscard]] inline +auto squeeze(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::squeeze() : invalid type, type should be an extents"); + + return detail::squeeze_impl(e); +} + +/** @brief Returns the product of extents */ +template +[[nodiscard]] inline +constexpr auto product(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::product() : invalid type, type should be an extents"); + + if ( e.empty() ) return 0u; + else return std::accumulate(e.begin(), e.end(), 1u, std::multiplies<>()) ; +} + + +template && is_extents_v + , int> = 0 +> +[[nodiscard]] inline +constexpr bool operator==(LExtents const& lhs, RExtents const& rhs) noexcept{ + + static_assert( std::is_same_v, + "boost::numeric::ublas::operator==(LExtents, RExtents) : LHS value type should be same as RHS value type"); + + return ( lhs.size() == rhs.size() ) && std::equal(lhs.begin(), lhs.end(), rhs.begin()); +} + +template && is_extents_v + , int> = 0 +> +[[nodiscard]] inline +constexpr bool operator!=(LExtents const& lhs, RExtents const& rhs) noexcept{ + + static_assert( std::is_same_v, + "boost::numeric::ublas::operator!=(LExtents, RExtents) : LHS value type should be same as RHS value type"); + + return !( lhs == rhs ); +} + +} // namespace boost::numeric::ublas + +#endif diff --git a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp new file mode 100644 index 000000000..cdc1840f4 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp @@ -0,0 +1,200 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// +/// \file strides.hpp Definition for the basic_strides template class + + +#ifndef _BOOST_UBLAS_TENSOR_DYNAMIC_STRIDES_HPP_ +#define _BOOST_UBLAS_TENSOR_DYNAMIC_STRIDES_HPP_ + +#include +#include +#include + +namespace boost { +namespace numeric { +namespace ublas { + +using first_order = column_major; +using last_order = row_major; + +template +class basic_extents; + + +/** @brief Template class for storing tensor strides for iteration with runtime variable size. + * + * Proxy template class of std::vector. + * + */ +template +class basic_strides +{ +public: + + using base_type = std::vector<__int_type>; + + static_assert( std::numeric_limits::is_integer, + "Static error in boost::numeric::ublas::basic_strides: type must be of type integer."); + static_assert(!std::numeric_limits::is_signed, + "Static error in boost::numeric::ublas::basic_strides: type must be of type unsigned integer."); + static_assert(std::is_same<__layout,first_order>::value || std::is_same<__layout,last_order>::value, + "Static error in boost::numeric::ublas::basic_strides: layout type must either first or last order"); + + + using layout_type = __layout; + using value_type = typename base_type::value_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using size_type = typename base_type::size_type; + using const_pointer = typename base_type::const_pointer; + using const_iterator = typename base_type::const_iterator; + + + /** @brief Default constructs basic_strides + * + * @code auto ex = basic_strides{}; + */ + constexpr explicit basic_strides() + : _base{} + { + } + + /** @brief Constructs basic_strides from basic_extents for the first- and last-order storage formats + * + * @code auto strides = basic_strides( basic_extents{2,3,4} ); + * + */ + template + basic_strides(basic_extents const& s) + : _base(s.size(),1) + { + if( s.empty() ) + return; + + if( !valid(s) ) + throw std::runtime_error("Error in boost::numeric::ublas::basic_strides() : shape is not valid."); + + if( is_vector(s) || is_scalar(s) ) /* */ + return; + + if( this->size() < 2 ) + throw std::runtime_error("Error in boost::numeric::ublas::basic_strides() : size of strides must be greater or equal 2."); + + + if constexpr (std::is_same::value){ + assert(this->size() >= 2u); + size_type k = 1ul, kend = this->size(); + for(; k < kend; ++k) + _base[k] = _base[k-1] * s[k-1]; + } + else { + assert(this->size() >= 2u); + size_type k = this->size()-2, kend = 0ul; + for(; k > kend; --k) + _base[k] = _base[k+1] * s[k+1]; + _base[0] = _base[1] * s[1]; + } + } + + basic_strides(basic_strides const& l) + : _base(l._base) + {} + + basic_strides(basic_strides && l ) + : _base(std::move(l._base)) + {} + + basic_strides(base_type const& l ) + : _base(l) + {} + + basic_strides(base_type && l ) + : _base(std::move(l)) + {} + + ~basic_strides() = default; + + + basic_strides& operator=(basic_strides other) + { + swap (*this, other); + return *this; + } + + friend void swap(basic_strides& lhs, basic_strides& rhs) { + std::swap(lhs._base , rhs._base); + } + + [[nodiscard]] inline + constexpr const_reference operator[] (size_type p) const{ + return _base[p]; + } + + [[nodiscard]] inline + constexpr const_pointer data() const{ + return _base.data(); + } + + [[nodiscard]] inline + constexpr const_reference at (size_type p) const{ + return _base.at(p); + } + + [[nodiscard]] inline + constexpr const_reference back () const{ + return _base[_base.size() - 1]; + } + + [[nodiscard]] inline + constexpr reference back (){ + return _base[_base.size() - 1]; + } + + [[nodiscard]] inline + constexpr bool empty() const{ + return _base.empty(); + } + + [[nodiscard]] inline + constexpr size_type size() const{ + return _base.size(); + } + + [[nodiscard]] inline + constexpr const_iterator begin() const{ + return _base.begin(); + } + + [[nodiscard]] inline + constexpr const_iterator end() const{ + return _base.end(); + } + + inline + constexpr void clear() { + this->_base.clear(); + } + + [[nodiscard]] inline + constexpr base_type const& base() const{ + return this->_base; + } + +protected: + base_type _base; +}; + +} +} +} + +#endif diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index 908e359ea..73430ad23 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -30,69 +30,82 @@ explicit unit_test_framework ; test-suite boost-ublas-tensor-test - : - [ run test_main.cpp - extents/test_extents_empty.cpp - extents/test_extents_size.cpp - extents/test_extents_at.cpp - extents/test_extents_access_operator.cpp - extents/test_extents_construction.cpp - extents/test_extents_assignment_operator.cpp - extents/test_extents_is_scalar.cpp - extents/test_extents_is_vector.cpp - extents/test_extents_is_matrix.cpp - extents/test_extents_is_tensor.cpp - extents/test_extents_is_valid.cpp - extents/test_extents_product.cpp - extents/test_extents_comparision.cpp - tensor/test_tensor_empty.cpp - tensor/test_tensor_size.cpp - tensor/test_tensor_at.cpp - tensor/test_tensor_access_operator.cpp - tensor/test_tensor_construction.cpp - tensor/test_tensor_assignment_operator.cpp - tensor/test_tensor_comparision.cpp - tensor/test_tensor_comparision_with_tensor_expression.cpp - tensor/test_tensor_comparision_with_scalar.cpp - tensor/test_tensor_operator_arithmetic_binary.cpp - tensor/test_tensor_operator_arithmetic_unary.cpp - tensor/test_tensor_operator_arithmetic_assignment.cpp - tensor/test_tensor_expression_evaluation_retrieve_extents.cpp - tensor/test_tensor_expression_evaluation_all_extents_equal.cpp - tensor/test_tensor_expression_access.cpp - tensor/test_tensor_unary_expression.cpp - tensor/test_tensor_binary_expression.cpp - algorithm/test_algorithm_copy.cpp - algorithm/test_algorithm_transform.cpp - algorithm/test_algorithm_accumulate.cpp - algorithm/test_algorithm_trans.cpp - multiplication/test_multiplication_mtv.cpp - multiplication/test_multiplication_mtm.cpp - multiplication/test_multiplication_ttv.cpp - multiplication/test_multiplication_ttm.cpp - multiplication/test_multiplication_ttt_permutation.cpp - multiplication/test_multiplication_inner.cpp - multiplication/test_multiplication_outer.cpp - multiplication/test_multiplication_ttt.cpp - functions/test_functions_vector.cpp - functions/test_functions_matrix.cpp - functions/test_functions_tensor.cpp - functions/test_functions_tensor_permutation.cpp - functions/test_functions_inner.cpp - functions/test_functions_norm.cpp - functions/test_functions_real_imag_conj.cpp - functions/test_functions_outer.cpp - functions/test_functions_trans.cpp - test_strides.cpp - test_multi_index.cpp - test_multi_index_utility.cpp - test_einstein_notation.cpp - unit_test_framework - : - : - : - : test_tensor - : - # + : # + [ run # + algorithm/test_algorithm_accumulate.cpp + algorithm/test_algorithm_copy.cpp + algorithm/test_algorithm_trans.cpp + algorithm/test_algorithm_transform.cpp + # + extents/test_extents_access_operator.cpp + extents/test_extents_assignment_operator.cpp + extents/test_extents_at.cpp + extents/test_extents_comparision.cpp + extents/test_extents_construction.cpp + extents/test_extents_empty.cpp + extents/test_extents_is_matrix.cpp + extents/test_extents_is_scalar.cpp + extents/test_extents_is_tensor.cpp + extents/test_extents_is_vector.cpp + extents/test_extents_is_valid.cpp + extents/test_extents_product.cpp + extents/test_extents_size.cpp + # + functions/test_functions_inner.cpp + functions/test_functions_matrix.cpp + functions/test_functions_norm.cpp + functions/test_functions_outer.cpp + functions/test_functions_real_imag_conj.cpp + functions/test_functions_tensor.cpp + functions/test_functions_tensor_permutation.cpp + functions/test_functions_trans.cpp + functions/test_functions_vector.cpp + # + multiplication/test_multiplication_inner.cpp + multiplication/test_multiplication_mtm.cpp + multiplication/test_multiplication_mtv.cpp + multiplication/test_multiplication_outer.cpp + multiplication/test_multiplication_ttm.cpp + multiplication/test_multiplication_ttt_permutation.cpp + multiplication/test_multiplication_ttt.cpp + multiplication/test_multiplication_ttv.cpp + # + tensor/test_tensor_access_operator.cpp + tensor/test_tensor_assignment_operator.cpp + tensor/test_tensor_at.cpp + tensor/test_tensor_binary_expression.cpp + tensor/test_tensor_comparision.cpp + tensor/test_tensor_comparision_with_scalar.cpp + tensor/test_tensor_comparision_with_tensor_expression.cpp + tensor/test_tensor_construction.cpp + tensor/test_tensor_empty.cpp + tensor/test_tensor_expression_access.cpp + tensor/test_tensor_expression_evaluation_all_extents_equal.cpp + tensor/test_tensor_expression_evaluation_retrieve_extents.cpp + tensor/test_tensor_operator_arithmetic_assignment.cpp + tensor/test_tensor_operator_arithmetic_binary.cpp + tensor/test_tensor_operator_arithmetic_unary.cpp + tensor/test_tensor_size.cpp + tensor/test_tensor_unary_expression.cpp + # + test_access.cpp + test_einstein_notation.cpp + test_main.cpp + test_multi_index.cpp + test_multi_index_utility.cpp + test_span.cpp + test_strides.cpp + test_subtensor.cpp + test_subtensor_utility.cpp + test_tensor.cpp + unit_test_framework + # + : + : + : + : test_tensor + : + # ] ; + diff --git a/test/tensor/test_access.cpp b/test/tensor/test_access.cpp new file mode 100644 index 000000000..559482ba0 --- /dev/null +++ b/test/tensor/test_access.cpp @@ -0,0 +1,305 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#include +#include + +#include +#include +#include + +#include +#include + + +BOOST_AUTO_TEST_SUITE ( test_access_suite ) + + +using layout_types = std::tuple; + +//zip>::with_t; + +struct fixture { + using extents_t = boost::numeric::ublas::dynamic_extents<>; + using value_t = typename extents_t::value_type; + using multi_index_t = std::vector; + using index_t = value_t; + + fixture() + { + static_assert(shapes.size() == multi_index.size(),""); + static_assert(shapes.size() == indexf.size(),""); + static_assert(shapes.size() == indexl.size(),""); + static_assert(shapes.size() == ranks.size(),""); + + for(auto k = 0u; k < multi_index.size(); ++k){ + auto const& n = shapes[k]; + auto const r = ranks[k]; + assert( n.size() == r ); + for (auto const& i : multi_index[k]){ + assert( std::equal(i.begin(), i.end(), n.begin(), std::less<>{}) ) ; + } + } + } + + + static inline auto shapes = std::array + {{ + { }, + {1,1 }, + + {1,2 }, + {2,1 }, + {2,3 }, + + {2,3,1 }, + {1,2,3 }, + {3,1,2 }, + {3,2,4 }, + + {2,3,4,1}, + {1,2,3,4}, + {3,1,2,4}, + {3,2,4,5} + }}; + + static constexpr inline auto ranks = std::array + { 0,2,2,2,2,3,3,3,3,4,4,4,4 }; + + static inline auto multi_index = std::array,shapes.size()> + {{ + {{ { }, { }, { } }}, // 0 {} + {{ {0,0 }, {0,0 }, {0,0 } }}, // 1 {1,1} + + {{ {0,0 }, {0,1 }, {0,1 } }}, // 2 {1,2} + {{ {0,0 }, {1,0 }, {1,0 } }}, // 3 {2,1} + {{ {0,0 }, {1,1 }, {1,2 } }}, // 4 {2,3} + + {{ {0,0,0 }, {1,1,0 }, {1,2,0 } }}, // 5 {2,3,1} + {{ {0,0,0 }, {0,1,1 }, {0,1,2 } }}, // 6 {1,2,3} + {{ {0,0,0 }, {1,0,1 }, {2,0,1 } }}, // 7 {3,1,2} + {{ {0,0,0 }, {1,1,2 }, {2,1,3 } }}, // 8 {3,2,4} + + {{ {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }}, // 9 {2,3,4,1} + {{ {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }}, //10 {1,2,3,4} + {{ {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }}, //11 {3,1,2,4} + {{ {0,0,0,0}, {1,1,2,3}, {2,1,3,4} }} //12 {3,2,4,5} + }}; + + static constexpr inline auto indexf = std::array,shapes.size()> + {{ + {{0, 0, 0}}, // 0 {} + {{0, 0, 0}}, // 1 {1,1} + {{0, 1, 1}}, // 2 {1,2} + {{0, 1, 1}}, // 3 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} + {{0, 3, 5}}, // 4 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3} + {{0, 3, 5}}, // 5 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1} + {{0, 3, 5}}, // 6 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3} + {{0, 4, 5}}, // 7 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2} + {{0,16, 23}}, // 8 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4}, {1,3,6} + {{0,15, 23}}, // 9 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {1,2,6,6} + {{0,15, 23}}, // 10 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {1,1,2,6} + {{0,16, 23}}, // 11 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, {1,3,3,6} + {{0,88,119}}, // 12 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {1,3,6,24} + }}; + + static constexpr inline auto indexl = std::array,shapes.size()> + {{ + {{0, 0, 0}}, // 0 {} + {{0, 0, 0}}, // 1 {1,1} + {{0, 1, 1}}, // 2 {1,2} + {{0, 1, 1}}, // 3 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1 }, {1,1} + {{0, 4, 5}}, // 4 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3 }, {3,1} + {{0, 4, 5}}, // 5 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1 }, {3,1,1} + {{0, 4, 5}}, // 6 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3 }, {6,3,1} + {{0, 3, 5}}, // 7 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2 }, {2,2,1} + {{0,14, 23}}, // 8 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4 }, {8,4,1} + {{0,18, 23}}, // 9 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {12, 4,1,1} + {{0,18, 23}}, // 10 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {24,12,4,1} + {{0,14, 23}}, // 11 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, { 8, 8,4,1} + {{0,73,119}}, // 12 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {40,20,5,1} + }}; + + template + constexpr inline auto prodn(extents_type const& n) + { + return std::accumulate(n.begin(),n.end(),1ul, std::multiplies<>{}); + } + + // static constexpr inline auto const& e = shapes; + // static constexpr inline auto const& i = multi_indices; + + + // template struct x { static inline constexpr auto value = e[k][r]*x::value; }; + // template struct x { static inline constexpr auto value = 1; }; + // template struct x { static inline constexpr auto value = 1*x::value; }; + + // template struct y { static inline constexpr auto value = e[k][r ]*y::value; }; + // template struct y { static inline constexpr auto value = 1*y::value; }; + // template struct y { static inline constexpr auto value = e[k][p-1]; }; + + + // template static inline constexpr auto wf = x::value; + // template static inline constexpr auto wl = y::value; + + // template struct zf { static inline constexpr auto value = i[k][kk][r]*wf + zf::value; }; + // template struct zf<0,k,kk> { static inline constexpr auto value = i[k][kk][0]*wf; }; + + // template static inline constexpr auto c2 = zf<2,k,kk>::value; + // template static inline constexpr auto c3 = zf<3,k,kk>::value; + // template static inline constexpr auto c4 = zf<4,k,kk>::value; + + + +}; + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index, layout_t, layout_types, fixture ) +{ + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; + using strides_t = ub::basic_strides; + + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + mp::mp_for_each>( [&]( auto I ) { + auto const& n = std::get(shapes); + auto const& i = std::get(multi_index); + auto const& jref = std::get(index); + mp::mp_for_each>( [&]( auto K ) { + auto const& ii = std::get(i); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,strides_t(n).begin()); + BOOST_CHECK(j < prodn(n)); + BOOST_CHECK_EQUAL(j,jref[K]); + }); + }); +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_static_rank, layout_t, layout_types, fixture ) +{ + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; + using strides_t = ub::basic_strides; + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + mp::mp_for_each>( [&]( auto I ) { + auto const& n = std::get(shapes); + auto const& i = std::get(multi_index); + auto const& jref = std::get(index); + constexpr auto r = std::get(ranks); + mp::mp_for_each>( [&]( auto K ) { + auto const& ii = std::get(i); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,strides_t(n).begin()); + BOOST_CHECK(j < prodn(n)); + BOOST_CHECK_EQUAL(j,jref[K]); + }); + }); +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index, layout_t, layout_types, fixture ) +{ + using namespace boost::numeric::ublas; + using strides_t = basic_strides; + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + for(auto k = 0u; k < index.size(); ++k){ + auto const& n = shapes[k]; + auto const& iref = multi_index[k]; + auto const& w = strides_t(n); + auto const& jref = index[k]; + for(auto kk = 0u; kk < iref.size(); ++kk){ + auto const jj = jref[kk]; + auto const& ii = iref[kk]; + auto i = multi_index_t(w.size()); + //detail::compute_multi_index(jj, w.begin(), w.end(), i.begin()); + if constexpr ( is_first_order ) + detail::compute_multi_index_first(jj, w.begin(), w.end(), i.begin()); + else + detail::compute_multi_index_last (jj, w.begin(), w.end(), i.begin()); + + std::cout << "j= " << jj << std::endl; + std::cout << "i= [ "; for(auto iii : i) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << "ii_= [ "; for(auto iii : ii) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << "n= [ "; for(auto iii : n) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << "w= [ "; for(auto iii : w) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << std::endl; + + + + BOOST_CHECK ( std::equal(i.begin(),i.end(),ii.begin()) ) ; + } + } +} + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t, layout_types, fixture ) +{ + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; + using strides_t = ub::basic_strides; + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + + mp::mp_for_each>( [&]( auto I ) { + auto const& n = std::get(shapes); + auto const& iref = std::get(multi_index); + auto const& jref = std::get(index); + auto const& w = strides_t(n); + constexpr auto r = std::get(ranks); + mp::mp_for_each>( [&]( auto K ) { + auto const jj = std::get(jref); + auto const& ii = std::get(iref); + auto i = multi_index_t(w.size()); + ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin()); + BOOST_CHECK ( std::equal(i.begin(),i.end(),ii.begin()) ) ; + }); + }); +} + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_subtensor, layout_t, layout_types, fixture ) +{ + using namespace boost::numeric::ublas; + using strides_t = basic_strides; + + // subtensor the whole index-domain of a tensor + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + // subtensor the whole index-domain of a tensor + for(auto k = 0u; k < index.size(); ++k){ + auto const& n = shapes[k]; + auto const& w = strides_t(n); + auto const& jref = index[k]; + for(auto kk = 0u; kk < jref.size(); ++kk){ + auto const jj = jref[kk]; + auto const j = detail::compute_single_index(jj,w.begin(),w.end(),w.begin()); + BOOST_CHECK_EQUAL ( j, jj ) ; + } + } +} + + + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index e8c5aed19..2c4cace00 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -142,7 +142,7 @@ BOOST_FIXTURE_TEST_CASE( transform_strided_span_test, fixture_strided_span ) struct fixture_shape { using shape = boost::numeric::ublas::basic_extents; - fixture_shape() : extents{ + fixture_shape() : extents{ shape{}, // 0 shape{1,1}, // 1 shape{1,2}, // 2 @@ -153,15 +153,15 @@ struct fixture_shape { shape{1,2,3}, // 7 shape{4,2,3}, // 8 shape{4,2,3,5} // 9 - } + } {} - std::vector extents; + std::vector extents; }; BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) { using namespace boost::numeric::ublas; - using span = sliced_span; + using span = sliced_span; // shape{} { From 64c661b0d86707e41d1147af7eac817ddceefed5 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Mon, 6 Jul 2020 08:24:39 +0200 Subject: [PATCH 4/9] stride creation changed. --- include/boost/numeric/ublas/tensor/access.hpp | 119 +++++--- .../ublas/tensor/detail/extents_functions.hpp | 278 +++++++++++------- .../numeric/ublas/tensor/dynamic_strides.hpp | 39 +-- .../numeric/ublas/tensor/multiplication.hpp | 6 - test/tensor/test_access.cpp | 106 ++++--- 5 files changed, 331 insertions(+), 217 deletions(-) diff --git a/include/boost/numeric/ublas/tensor/access.hpp b/include/boost/numeric/ublas/tensor/access.hpp index e83e588c9..108c600cc 100644 --- a/include/boost/numeric/ublas/tensor/access.hpp +++ b/include/boost/numeric/ublas/tensor/access.hpp @@ -18,6 +18,16 @@ #include #include +#include + + +namespace boost::numeric::ublas { + +using first_order = column_major; +using last_order = row_major; + +} + namespace boost::numeric::ublas::detail{ @@ -57,65 +67,64 @@ constexpr inline auto compute_single_index(InputIt1 i, InputIt1 /*ip*/, InputIt2 * @param wp end input iterator to a container with tensor or subtensor strides of length std::distance(begin,end) * @param i begin output iterator to a container with tensor or subtensor indices length std::distance(begin,end) or greater */ +template +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, LayoutType l); + + template -constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, first_order ) { if(w == wp) return; - auto p = std::distance(w,wp); - auto kq = j; - //auto q = 0ul; + auto wr = std::make_reverse_iterator( w ); + auto wrp = std::make_reverse_iterator( wp ); + auto ir = std::make_reverse_iterator( i+std::distance(w,wp) ); - - for(int r = p-1; r >= 0; --r) - { - //q = l[r]-1; - i[r] = kq/w[r]; - kq -= w[r]*i[r]; - } - - //std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); + std::transform(wrp,wr,ir, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); } - template -constexpr inline void compute_multi_index_first(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, last_order ) { if(w == wp) return; - auto p = std::distance(w,wp); - auto kq = j; - //auto q = 0ul; + std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +} -// for(int r = p-1; r >= 0; --r) -// { -// //q = l[r]-1; -// i[r] = kq/w[r]; -// kq -= w[r]*i[r]; -// } - std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); -} -template -constexpr inline void compute_multi_index_last(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) -{ - if(w == wp) - return; - auto p = std::distance(w,wp); - auto kq = j; +//template +//constexpr inline void compute_multi_index_last(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +//{ +// if(w == wp) +// return; +//// for(unsigned r = 0ul; r < p; ++r) { +//// i[r] = kq/w[r]; +//// kq -= w[r]*i[r]; +//// } +// std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +//} - for(unsigned r = 0ul; r < p; ++r) { - i[r] = kq/w[r]; - kq -= w[r]*i[r]; - } +//template +//constexpr inline void compute_multi_index_first(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +//{ +// if(w == wp) +// return; - //std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); -} +//// for(int r = p-1; r >= 0; --r) { +//// i[r] = kq/w[r]; +//// kq -= w[r]*i[r]; +//// } +// auto wr = std::make_reverse_iterator( w ); +// auto wrp = std::make_reverse_iterator( wp ); +// auto ir = std::make_reverse_iterator( i+std::distance(w,wp) ); + +// std::transform(wrp,wr,ir, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +//} /** @brief Computes a single index from a multi-index of a dense tensor or subtensor @@ -124,15 +133,39 @@ constexpr inline void compute_multi_index_last(std::size_t j, InputIt1 w, InputI * @param w begin input iterator to a container with strides of length p * @param i begin input iterator to a container with indices of length p or greater */ +template +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i, LayoutType); + + template -constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i) +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i, first_order o) { - if constexpr (p==0u) return; - else if constexpr (p >1u) {i[p-1]=j/w[p-1]; compute_multi_index(j-w[p-1]*i[p-1],w,w,i); } - else {i[p-1]=j/w[p-1]; } + if constexpr (p==0u) return; + else if constexpr (p >1u) {i[p-1]=j/w[p-1]; compute_multi_index(j-w[p-1]*i[p-1],w,w,i,o); } + else {i[p-1]=j/w[p-1]; } } + +template +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i, last_order o) +{ + if constexpr (p == 0u ) { return; } + else if constexpr (k+1 == p) {i[k]=j/w[k]; } + else {i[k]=j/w[k]; compute_multi_index(j-w[k]*i[k],w,w,i,o); } +} + + + +//template +//constexpr inline void compute_multi_index_last(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i) +//{ +// if constexpr (p == 0u ) return; +// else if constexpr (k+1 == p) {i[k]=j/w[k]; } +// else {i[k]=j/w[k]; compute_multi_index_last(j-w[k]*i[k],w,w,i); } +//} + + /** @brief Computes a single (relative memory) index of a dense tensor from a single index of one of its subtensor * * @param jv single index of a subtensor that is transformed into a single index of a dense tensor diff --git a/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp b/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp index 4273829d1..479555e57 100644 --- a/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp +++ b/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp @@ -23,43 +23,43 @@ namespace boost::numeric::ublas::detail{ - template - constexpr auto push_back(basic_static_extents) -> basic_static_extents; - - template - constexpr auto push_front(basic_static_extents) -> basic_static_extents; - - template - constexpr auto squeeze_impl_remove_one( basic_static_extents, basic_static_extents num = basic_static_extents{} ){ - // executed when basic_static_extents is size of 1 - // @code basic_static_extents @endcode - if constexpr( sizeof...(E) == 0ul ){ - // if element E0 is 1 we return number list but we do not append - // it to the list - if constexpr( E0 == T(1) ){ - return num; - }else{ - // if element E0 is 1 we return number list but we append - // it to the list - return decltype(push_back(num)){}; - } - }else{ - if constexpr( E0 == T(1) ){ - // if element E0 is 1 we return number list but we do not append - // it to the list - return squeeze_impl_remove_one(basic_static_extents{}, num); - }else{ - // if element E0 is 1 we return number list but we append - // it to the list - auto n_num_list = decltype(push_back(num)){}; - return squeeze_impl_remove_one(basic_static_extents{}, n_num_list); - } - } +template +constexpr auto push_back(basic_static_extents) -> basic_static_extents; + +template +constexpr auto push_front(basic_static_extents) -> basic_static_extents; + +template +constexpr auto squeeze_impl_remove_one( basic_static_extents, basic_static_extents num = basic_static_extents{} ){ +// executed when basic_static_extents is size of 1 +// @code basic_static_extents @endcode +if constexpr( sizeof...(E) == 0ul ){ + // if element E0 is 1 we return number list but we do not append + // it to the list + if constexpr( E0 == T(1) ){ + return num; + }else{ + // if element E0 is 1 we return number list but we append + // it to the list + return decltype(push_back(num)){}; + } +}else{ + if constexpr( E0 == T(1) ){ + // if element E0 is 1 we return number list but we do not append + // it to the list + return squeeze_impl_remove_one(basic_static_extents{}, num); + }else{ + // if element E0 is 1 we return number list but we append + // it to the list + auto n_num_list = decltype(push_back(num)){}; + return squeeze_impl_remove_one(basic_static_extents{}, n_num_list); } +} +} + +template +constexpr auto squeeze_impl( basic_static_extents const& e ){ - template - constexpr auto squeeze_impl( basic_static_extents const& e ){ - using extents_type = basic_static_extents; if constexpr( extents_type::_size <= typename extents_type::size_type(2) ){ @@ -75,7 +75,7 @@ namespace boost::numeric::ublas::detail{ // if same that means 1s does not exist and no need to // squeeze if constexpr( decltype(one_free_static_extents)::_size != extents_type::_size ){ - + // after squeezing, all the extents are 1s we need to // return extents of (1, 1) if constexpr( decltype(one_free_static_extents)::_size == size_type(0) ){ @@ -99,17 +99,17 @@ namespace boost::numeric::ublas::detail{ }else{ return e; } - - } +} - template - inline - constexpr auto squeeze_impl( basic_extents const& e ){ +template +[[nodiscard]] inline constexpr + auto squeeze_impl( basic_extents const& e ) +{ using extents_type = basic_extents; using base_type = typename extents_type::base_type; using value_type = typename extents_type::value_type; using size_type = typename extents_type::size_type; - + if( e.size() <= size_type(2) ){ return e; } @@ -120,36 +120,36 @@ namespace boost::numeric::ublas::detail{ // count non one values size_type size = std::count_if(e.begin(), e.end(), not_one); - + // reserve space base_type n_extents( std::max(size, size_type(2)), 1 ); - + // copying non 1s to the new extents std::copy_if(e.begin(), e.end(), n_extents.begin(), not_one); // checking if extents size goes blow 2 // if size of extents goes to 1 // complying with GNU Octave - // if position 2 contains 1 we + // if position 2 contains 1 we // swap the pos if( size < size_type(2) && e[1] != value_type(1) ){ std::swap(n_extents[0], n_extents[1]); } - - return extents_type(n_extents); - } - template - inline - auto squeeze_impl( basic_fixed_rank_extents const& e ){ - if constexpr( N <= 2 ){ - return e; - }else{ - return squeeze_impl(basic_extents(e)); + return extents_type(n_extents); +} + +template +[[nodiscard]] inline constexpr + auto squeeze_impl( basic_fixed_rank_extents const& e ) +{ + if constexpr( N <= 2u ){ + return e; } - } - - + else{ + return squeeze_impl(basic_extents(e)); + } +} } // namespace boost::numeric::ublas::detail @@ -157,14 +157,19 @@ namespace boost::numeric::ublas { /** @brief Returns true if size > 1 and all elements > 0 or size == 1 && e[0] == 1 */ template -[[nodiscard]] inline -constexpr bool valid(ExtentsType const &e) { +[[nodiscard]] inline constexpr + bool valid(ExtentsType const &e) +{ - static_assert(is_extents_v, "boost::numeric::ublas::valid() : invalid type, type should be an extents"); + static_assert(is_extents_v, + "boost::numeric::ublas::valid() : " + "invalid type, type should be an extents"); - auto greater_than_zero = [](auto const& a){ return a > 0u; }; + auto greater_than_zero = [](auto a){ return a > 0u; }; - if( e.size() == 1u ) return e[0] == 1u; + if( e.size() == 1u ) { + return e[0] == 1u; + } return !e.empty() && std::all_of(e.begin(), e.end(), greater_than_zero ); } @@ -197,73 +202,143 @@ std::string to_string(T const &e) { return ss.str(); } + + +/** @brief Returns true if this has a matrix shape + * + * @returns true if (1,1,[1,...,1]) + * + * @param first input iterator pointing to the start of a shape object + * @param last input iterator pointing to the end of a shape object + */ +template +[[nodiscard]] inline constexpr + bool is_scalar(InputIt first, InputIt last) +{ + return std::distance(first,last)>0u && + std::all_of (first,last,[](auto a){return a==1u;}); +} + + /** @brief Returns true if this has a scalar shape * * @returns true if (1,1,[1,...,1]) */ template -[[nodiscard]] inline -constexpr bool is_scalar(ExtentsType const &e) { +[[nodiscard]] inline constexpr + bool is_scalar(ExtentsType const &e) +{ + static_assert(is_extents_v, + "boost::numeric::ublas::is_scalar() : " + "invalid type, type should be an extents"); + return is_scalar(e.begin(),e.end()); +} - static_assert(is_extents_v, "boost::numeric::ublas::is_scalar() : invalid type, type should be an extents"); - - auto equal_one = [](auto const &a) { return a == 1u; }; - return !e.empty() && std::all_of(e.begin(), e.end(), equal_one); + +/** @brief Returns true if this has a matrix shape + * + * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 + * + * @param first input iterator pointing to the start of a shape object + * @param last input iterator pointing to the end of a shape object + */ +template +[[nodiscard]] inline constexpr + bool is_vector(InputIt first, InputIt last) +{ + if (std::distance(first,last) == 0u) return false; + if (std::distance(first,last) == 1u) return *first > 1u; + + return std::any_of(first ,first+2, [](auto a){return a >1u;}) && + std::any_of(first ,first+2, [](auto a){return a==1u;}) && + std::all_of(first+2,last , [](auto a){return a==1u;}); } + /** @brief Returns true if this has a vector shape * * @returns true if (1,n,[1,...,1]) or (n,1,[1,...,1]) with n > 1 + * + * @param e extents with boost::numeric::ublas::is_valid(e) + * and supporting e.begin() and e.end() */ template -[[nodiscard]] inline -constexpr bool is_vector(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_vector() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u; }; - auto equal_one = [](auto const &a) { return a == 1u; }; +[[nodiscard]] inline constexpr + bool is_vector(ExtentsType const &e) +{ + static_assert(is_extents_v, + "boost::numeric::ublas::is_vector() : " + "invalid type, type should be an extents"); + + return is_vector(e.begin(),e.end()); +} - if (e.size() == 0u) return false; - else if (e.size() == 1u) return e[0] > 1u; - else return std::any_of(e.begin(), e.begin() + 2, greater_one) && - std::any_of(e.begin(), e.begin() + 2, equal_one) && - std::all_of(e.begin() + 2, e.end(), equal_one); +/** @brief Returns true if this has a matrix shape + * + * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 + * + * @param first input iterator pointing to the start of a shape object + * @param last input iterator pointing to the end of a shape object + */ +template +[[nodiscard]] inline constexpr + bool is_matrix(InputIt first, InputIt last) +{ + return std::distance(first,last)>=2u && + std::all_of(first , first+2, [](auto a){return a >1u;}) && + std::all_of(first+2, last , [](auto a){return a==1u;}); } /** @brief Returns true if this has a matrix shape * * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 + * + * @param e extents with boost::numeric::ublas::is_valid(e) + * and supporting e.begin() and e.end() */ template -[[nodiscard]] inline -constexpr bool is_matrix(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_matrix() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u; }; - auto equal_one = [](auto const &a) { return a == 1u; }; +[[nodiscard]] inline constexpr + bool is_matrix(ExtentsType const &e) +{ + static_assert(is_extents_v, + "boost::numeric::ublas::is_matrix() : " + "invalid type, type should be an extents"); + return is_matrix(e.begin(),e.end()); +} - return ( e.size() >= 2u ) && std::all_of(e.begin(), e.begin() + 2, greater_one) && - std::all_of(e.begin() + 2, e.end(), equal_one); +/** @brief Returns true if this is has a tensor shape + * + * @returns true if !empty() && !is_scalar() && !is_vector() && !is_matrix() + * + * @param first input iterator pointing to the start of a shape object + * @param last input iterator pointing to the end of a shape object + */ +template +[[nodiscard]] inline constexpr + bool is_tensor(InputIt first, InputIt last) +{ + return std::distance(first,last)>=3u && + std::any_of(first+2, last, [](auto a){return a>1u;}); } /** @brief Returns true if this is has a tensor shape * * @returns true if !empty() && !is_scalar() && !is_vector() && !is_matrix() + * + * @param e extents with boost::numeric::ublas::is_valid(e) + * supporting e.begin() and e.end() */ template -[[nodiscard]] inline -constexpr bool is_tensor(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_tensor() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u;}; - - return ( e.size() >= 3u ) && std::any_of(e.begin() + 2, e.end(), greater_one); +[[nodiscard]] inline constexpr + bool is_tensor(ExtentsType const &e) +{ + static_assert(is_extents_v, + "boost::numeric::ublas::is_tensor() : " + "invalid type, type should be an extents"); + return is_tensor(e.begin(),e.end()); } /** @brief Eliminates singleton dimensions when size > 2 @@ -280,11 +355,12 @@ constexpr bool is_tensor(ExtentsType const &e) { */ template [[nodiscard]] inline -auto squeeze(ExtentsType const &e) { - + auto squeeze(ExtentsType const &e) +{ + static_assert(is_extents_v, "boost::numeric::ublas::squeeze() : invalid type, type should be an extents"); - return detail::squeeze_impl(e); + return detail::squeeze_impl(e); } /** @brief Returns the product of extents */ diff --git a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp index cdc1840f4..45001f179 100644 --- a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp +++ b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp @@ -74,35 +74,38 @@ class basic_strides * */ template - basic_strides(basic_extents const& s) - : _base(s.size(),1) + basic_strides(basic_extents const& n) + : _base(n.size(),1) { - if( s.empty() ) + if( n.empty() ) return; - if( !valid(s) ) + if( !valid(n) ) throw std::runtime_error("Error in boost::numeric::ublas::basic_strides() : shape is not valid."); - if( is_vector(s) || is_scalar(s) ) /* */ - return; +// if( is_vector(s) || is_scalar(s) ) /* */ +// return; + + const auto p = this->size(); - if( this->size() < 2 ) + if( p < 2 ) throw std::runtime_error("Error in boost::numeric::ublas::basic_strides() : size of strides must be greater or equal 2."); + assert(p >= 2u); + auto& w = _base; - if constexpr (std::is_same::value){ - assert(this->size() >= 2u); - size_type k = 1ul, kend = this->size(); - for(; k < kend; ++k) - _base[k] = _base[k-1] * s[k-1]; + auto q = base_type(_base.size()); + if( std::is_same_v){ + std::iota(q.begin(), q.end(), 0u); } - else { - assert(this->size() >= 2u); - size_type k = this->size()-2, kend = 0ul; - for(; k > kend; --k) - _base[k] = _base[k+1] * s[k+1]; - _base[0] = _base[1] * s[1]; + else{ + std::iota(q.rbegin(), q.rend(), 0u); } + + w[ q[0] ] = 1u; + for(auto k = 1u; k < p; ++k) + w[ q[k] ] = w[ q[k-1] ] * n [ q[k-1] ]; + } basic_strides(basic_strides const& l) diff --git a/include/boost/numeric/ublas/tensor/multiplication.hpp b/include/boost/numeric/ublas/tensor/multiplication.hpp index 6a9c0613b..e2d94f7be 100644 --- a/include/boost/numeric/ublas/tensor/multiplication.hpp +++ b/include/boost/numeric/ublas/tensor/multiplication.hpp @@ -645,12 +645,6 @@ void ttv(SizeType const m, SizeType const p, } } - for(auto i = m; i < p; ++i){ - if(na[i] != nc[i-1]){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); - } - } - const auto max = std::max(nb[0], nb[1]); if( na[m-1] != max){ throw std::length_error("Error in boost::numeric::ublas::ttv: Extent of dimension mode of A and b must be equal."); diff --git a/test/tensor/test_access.cpp b/test/tensor/test_access.cpp index 559482ba0..57ea432f9 100644 --- a/test/tensor/test_access.cpp +++ b/test/tensor/test_access.cpp @@ -51,13 +51,15 @@ struct fixture { } - static inline auto shapes = std::array + static inline auto shapes = std::array {{ { }, {1,1 }, {1,2 }, + {1,4 }, {2,1 }, + {4,1 }, {2,3 }, {2,3,1 }, @@ -72,7 +74,7 @@ struct fixture { }}; static constexpr inline auto ranks = std::array - { 0,2,2,2,2,3,3,3,3,4,4,4,4 }; + { 0,2,2,2,2,2,2,3,3,3,3,4,4,4,4 }; static inline auto multi_index = std::array,shapes.size()> {{ @@ -80,52 +82,58 @@ struct fixture { {{ {0,0 }, {0,0 }, {0,0 } }}, // 1 {1,1} {{ {0,0 }, {0,1 }, {0,1 } }}, // 2 {1,2} - {{ {0,0 }, {1,0 }, {1,0 } }}, // 3 {2,1} - {{ {0,0 }, {1,1 }, {1,2 } }}, // 4 {2,3} - - {{ {0,0,0 }, {1,1,0 }, {1,2,0 } }}, // 5 {2,3,1} - {{ {0,0,0 }, {0,1,1 }, {0,1,2 } }}, // 6 {1,2,3} - {{ {0,0,0 }, {1,0,1 }, {2,0,1 } }}, // 7 {3,1,2} - {{ {0,0,0 }, {1,1,2 }, {2,1,3 } }}, // 8 {3,2,4} - - {{ {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }}, // 9 {2,3,4,1} - {{ {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }}, //10 {1,2,3,4} - {{ {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }}, //11 {3,1,2,4} - {{ {0,0,0,0}, {1,1,2,3}, {2,1,3,4} }} //12 {3,2,4,5} + {{ {0,0 }, {0,2 }, {0,3 } }}, // 3 {1,4} + {{ {0,0 }, {1,0 }, {1,0 } }}, // 4 {2,1} + {{ {0,0 }, {2,0 }, {3,0 } }}, // 5 {4,1} + {{ {0,0 }, {1,1 }, {1,2 } }}, // 6 {2,3} + + {{ {0,0,0 }, {1,1,0 }, {1,2,0 } }}, // 7 {2,3,1} + {{ {0,0,0 }, {0,1,1 }, {0,1,2 } }}, // 8 {1,2,3} + {{ {0,0,0 }, {1,0,1 }, {2,0,1 } }}, // 9 {3,1,2} + {{ {0,0,0 }, {1,1,2 }, {2,1,3 } }}, //10 {3,2,4} + + {{ {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }}, //11 {2,3,4,1} + {{ {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }}, //12 {1,2,3,4} + {{ {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }}, //13 {3,1,2,4} + {{ {0,0,0,0}, {1,1,2,3}, {2,1,3,4} }} //14 {3,2,4,5} }}; static constexpr inline auto indexf = std::array,shapes.size()> {{ - {{0, 0, 0}}, // 0 {} + {{0, 0, 0}}, // 0 {} {{0, 0, 0}}, // 1 {1,1} - {{0, 1, 1}}, // 2 {1,2} - {{0, 1, 1}}, // 3 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} - {{0, 3, 5}}, // 4 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3} - {{0, 3, 5}}, // 5 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1} - {{0, 3, 5}}, // 6 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3} - {{0, 4, 5}}, // 7 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2} - {{0,16, 23}}, // 8 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4}, {1,3,6} - {{0,15, 23}}, // 9 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {1,2,6,6} - {{0,15, 23}}, // 10 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {1,1,2,6} - {{0,16, 23}}, // 11 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, {1,3,3,6} - {{0,88,119}}, // 12 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {1,3,6,24} + {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} + {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} + {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} + {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} + {{0, 3, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3} + {{0, 3, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1} + {{0, 3, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3} + {{0, 4, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2} + {{0,16, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4}, {1,3,6} + {{0,15, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {1,2,6,6} + {{0,15, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {1,1,2,6} + {{0,16, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, {1,3,3,6} + {{0,88,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {1,3,6,24} }}; static constexpr inline auto indexl = std::array,shapes.size()> {{ {{0, 0, 0}}, // 0 {} {{0, 0, 0}}, // 1 {1,1} - {{0, 1, 1}}, // 2 {1,2} - {{0, 1, 1}}, // 3 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1 }, {1,1} - {{0, 4, 5}}, // 4 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3 }, {3,1} - {{0, 4, 5}}, // 5 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1 }, {3,1,1} - {{0, 4, 5}}, // 6 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3 }, {6,3,1} - {{0, 3, 5}}, // 7 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2 }, {2,2,1} - {{0,14, 23}}, // 8 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4 }, {8,4,1} - {{0,18, 23}}, // 9 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {12, 4,1,1} - {{0,18, 23}}, // 10 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {24,12,4,1} - {{0,14, 23}}, // 11 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, { 8, 8,4,1} - {{0,73,119}}, // 12 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {40,20,5,1} + {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} + {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} + {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} + {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} + {{0, 4, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3 }, {3,1} + {{0, 4, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1 }, {3,1,1} + {{0, 4, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3 }, {6,3,1} + {{0, 3, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2 }, {2,2,1} + {{0,14, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4 }, {8,4,1} + {{0,18, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {12, 4,1,1} + {{0,18, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {24,12,4,1} + {{0,14, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, { 8, 8,4,1} + {{0,73,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {40,20,5,1} }}; template @@ -228,18 +236,18 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index, layout_t, layout_ty auto const jj = jref[kk]; auto const& ii = iref[kk]; auto i = multi_index_t(w.size()); - //detail::compute_multi_index(jj, w.begin(), w.end(), i.begin()); - if constexpr ( is_first_order ) - detail::compute_multi_index_first(jj, w.begin(), w.end(), i.begin()); - else - detail::compute_multi_index_last (jj, w.begin(), w.end(), i.begin()); + detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); +// if constexpr ( is_first_order ) +// detail::compute_multi_index_first(jj, w.begin(), w.end(), i.begin()); +// else +// detail::compute_multi_index_last (jj, w.begin(), w.end(), i.begin()); - std::cout << "j= " << jj << std::endl; - std::cout << "i= [ "; for(auto iii : i) std::cout << iii << " "; std::cout << "];" << std::endl; - std::cout << "ii_= [ "; for(auto iii : ii) std::cout << iii << " "; std::cout << "];" << std::endl; - std::cout << "n= [ "; for(auto iii : n) std::cout << iii << " "; std::cout << "];" << std::endl; - std::cout << "w= [ "; for(auto iii : w) std::cout << iii << " "; std::cout << "];" << std::endl; - std::cout << std::endl; +// std::cout << "j= " << jj << std::endl; +// std::cout << "i= [ "; for(auto iii : i) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << "ii_ref = [ "; for(auto iii : ii) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << "n= [ "; for(auto iii : n) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << "w= [ "; for(auto iii : w) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << std::endl; @@ -268,7 +276,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t auto const jj = std::get(jref); auto const& ii = std::get(iref); auto i = multi_index_t(w.size()); - ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin()); + ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); BOOST_CHECK ( std::equal(i.begin(),i.end(),ii.begin()) ) ; }); }); From ae385d40c61d591930d192d84609658482df8158 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Fri, 28 May 2021 15:09:58 +0200 Subject: [PATCH 5/9] fix tesor-vector multiplication for subtensor. --- IDEs/qtcreator/test/test_tensor.pro | 4 +- include/boost/numeric/ublas/tensor/access.hpp | 26 +- .../boost/numeric/ublas/tensor/concepts.hpp | 1 + .../tensor/extents/extents_functions.hpp | 11 +- .../tensor/function/tensor_times_vector.hpp | 3 +- include/boost/numeric/ublas/tensor/span.hpp | 27 +- .../boost/numeric/ublas/tensor/subtensor.hpp | 209 +++++----- .../ublas/tensor/subtensor_utility.hpp | 78 ++-- include/boost/numeric/ublas/tensor/tags.hpp | 16 +- test/tensor/test_access.cpp | 371 +++++++++--------- test/tensor/test_strides.cpp | 1 + test/tensor/test_subtensor.cpp | 259 ++++++------ test/tensor/test_subtensor_utility.cpp | 85 ++-- 13 files changed, 536 insertions(+), 555 deletions(-) diff --git a/IDEs/qtcreator/test/test_tensor.pro b/IDEs/qtcreator/test/test_tensor.pro index 1c6c216c3..8fc57025f 100644 --- a/IDEs/qtcreator/test/test_tensor.pro +++ b/IDEs/qtcreator/test/test_tensor.pro @@ -5,13 +5,11 @@ CONFIG += staticlib depend_includepath console CONFIG -= qt CONFIG += c++20 -CONFIG += c++17 - #QMAKE_CXXFLAGS += -fno-inline QMAKE_CXXFLAGS =-std=c++20 QMAKE_CXXFLAGS +=-Wall -Wpedantic -Wextra QMAKE_CXXFLAGS +=-Wno-unknown-pragmas -QMAKE_CXXFLAGS +=-Wno-unused-but-set-variable +#QMAKE_CXXFLAGS +=-Wno-unused-but-set-variable gcc:QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp diff --git a/include/boost/numeric/ublas/tensor/access.hpp b/include/boost/numeric/ublas/tensor/access.hpp index 108c600cc..525282fd9 100644 --- a/include/boost/numeric/ublas/tensor/access.hpp +++ b/include/boost/numeric/ublas/tensor/access.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,8 +10,8 @@ // -#ifndef _BOOST_UBLAS_TENSOR_ACCESS_HPP -#define _BOOST_UBLAS_TENSOR_ACCESS_HPP +#ifndef BOOST_UBLAS_TENSOR_ACCESS_HPP +#define BOOST_UBLAS_TENSOR_ACCESS_HPP #include @@ -26,7 +26,7 @@ namespace boost::numeric::ublas { using first_order = column_major; using last_order = row_major; -} +} // namespace boost::numeric::ublas namespace boost::numeric::ublas::detail{ @@ -68,11 +68,21 @@ constexpr inline auto compute_single_index(InputIt1 i, InputIt1 /*ip*/, InputIt2 * @param i begin output iterator to a container with tensor or subtensor indices length std::distance(begin,end) or greater */ template -constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, LayoutType l); +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, LayoutType /*unused*/); +//{ +// if(w == wp) +// return; + +// auto wr = std::make_reverse_iterator( w ); +// auto wrp = std::make_reverse_iterator( wp ); +// auto ir = std::make_reverse_iterator( i+std::distance(w,wp) ); + +// std::transform(wrp,wr,ir, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +//} template -constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, first_order ) +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, first_order /*unused*/) { if(w == wp) return; @@ -85,7 +95,7 @@ constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp } template -constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, last_order ) +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, last_order /*unused*/) { if(w == wp) return; @@ -182,6 +192,6 @@ constexpr inline auto compute_single_index(std::size_t jv, InputIt1 w, InputIt1 ); } -} // namespace +} // namespace boost::numeric::ublas::detail #endif diff --git a/include/boost/numeric/ublas/tensor/concepts.hpp b/include/boost/numeric/ublas/tensor/concepts.hpp index 70820484a..1a293554c 100644 --- a/include/boost/numeric/ublas/tensor/concepts.hpp +++ b/include/boost/numeric/ublas/tensor/concepts.hpp @@ -15,6 +15,7 @@ #include + namespace boost::numeric::ublas{ template diff --git a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp index 85e64ff8f..149e12bde 100644 --- a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp +++ b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp @@ -153,9 +153,12 @@ template { auto s = typename extents_core::base_type(e.size(),1ul); - if(empty(e) || is_vector(e) || is_scalar(e)){ + if(empty(e) || is_scalar(e)){ return s; } + + // || is_vector(e) + if constexpr(std::is_same_v){ std::transform(begin (e), end (e) - 1, s.begin (), s.begin ()+1, std::multiplies<>{}); } else { @@ -170,9 +173,13 @@ template auto s = typename extents_core::base_type{}; std::fill(s.begin(),s.end(),1ul); - if(empty(e) || is_vector(e) || is_scalar(e)){ + if(empty(e) || is_scalar(e)){ return s; } + + + // || is_vector(e) + if constexpr(std::is_same_v){ std::transform(begin (e), end (e) - 1, s.begin (), s.begin ()+1, std::multiplies<>{}); } else { diff --git a/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp index 82c9b3c41..5702f97fb 100644 --- a/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp +++ b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp @@ -94,9 +94,8 @@ inline decltype(auto) prod( tensor_core< TE > const &a, vector const &b, c nc_base[j++] = na.at(i); auto nc = shape(nc_base); - - auto c = tensor( nc, value{} ); + auto const* bb = &(b(0)); ttv(m, p, c.data(), c.extents().data(), c.strides().data(), diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index f0875b60c..581e16dc6 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -10,26 +10,23 @@ // -#ifndef _BOOST_UBLAS_TENSOR_SPAN_ -#define _BOOST_UBLAS_TENSOR_SPAN_ +#ifndef BOOST_UBLAS_TENSOR_SPAN +#define BOOST_UBLAS_TENSOR_SPAN #include #include #include #include -namespace boost { -namespace numeric { -namespace ublas { -namespace tag { + +#include "concepts.hpp" + +namespace boost::numeric::ublas::tag{ struct sliced {}; struct strided {}; -} -} -} -} +} // namespace boost::numeric::ublas::tag namespace boost::numeric::ublas { @@ -53,7 +50,7 @@ template class span; -static constexpr inline std::size_t end = std::numeric_limits::max(); +static constexpr inline std::size_t max = std::numeric_limits::max(); template<> class span @@ -210,14 +207,14 @@ class span : using sliced_span = span; -template -inline auto ran(unsigned_type f, unsigned_type l) +template +inline auto ran(unsigned_type_left f, unsigned_type_right l) { return sliced_span(f,l); } -template -inline auto ran(unsigned_type f, unsigned_type s, unsigned_type l) +template +inline auto ran(unsigned_type_left f, unsigned_type_middle s, unsigned_type_right l) { return strided_span(f,s,l); } diff --git a/include/boost/numeric/ublas/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/subtensor.hpp index 5b07e9c5f..e8a150d16 100644 --- a/include/boost/numeric/ublas/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,32 +12,19 @@ /// \file subtensor.hpp Definition for the tensor template class -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP_ +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP - - -#include -#include -#include -#include -#include +#include "tensor.hpp" +#include "subtensor_utility.hpp" +#include "extents.hpp" +#include "span.hpp" +#include "expression.hpp" namespace boost::numeric::ublas { -template -class dynamic_tensor; - -template -class matrix; - -template -class vector; - - - /** @brief A view of a dense tensor of values of type \c T. @@ -61,63 +48,63 @@ class subtensor; * @tparam A The type of the storage array of the tensor. Default is \c unbounded_array. \c and \c std::vector can also be used */ template -class subtensor > +class subtensor > : public detail::tensor_expression< - subtensor> , - subtensor> > + subtensor> , + subtensor> > { - static_assert( std::is_same::value || std::is_same::value, - "boost::numeric::tensor template class only supports first- or last-order storage formats."); + static_assert( std::is_same::value || std::is_same::value, + "boost::numeric::tensor template class only supports first- or last-order storage formats."); - using tensor_type = dynamic_tensor; - using self_type = subtensor; + using tensor_type = tensor_dynamic; + using self_type = subtensor; public: - using domain_tag = tag::sliced; + using domain_tag = tag::sliced; - using span_type = span; + using span_type = span; - template - using tensor_expression_type = detail::tensor_expression; + template + using tensor_expression_type = detail::tensor_expression; - template - using matrix_expression_type = matrix_expression; + template + using matrix_expression_type = matrix_expression; - template - using vector_expression_type = vector_expression; + template + using vector_expression_type = vector_expression; - using super_type = tensor_expression_type; + using super_type = tensor_expression_type; -// static_assert(std::is_same_v, detail::tensor_expression,tensor>>, "tensor_expression_type"); + // static_assert(std::is_same_v, detail::tensor_expression,tensor>>, "tensor_expression_type"); - using array_type = typename tensor_type::array_type; - using layout_type = typename tensor_type::layout_type; + using container_type = typename tensor_type::container_type; + using layout_type = typename tensor_type::layout_type; - using size_type = typename tensor_type::size_type; - using difference_type = typename tensor_type::difference_type; - using value_type = typename tensor_type::value_type; + using size_type = typename tensor_type::size_type; + using difference_type = typename tensor_type::difference_type; + using value_type = typename tensor_type::value_type; - using reference = typename tensor_type::reference; - using const_reference = typename tensor_type::const_reference; + using reference = typename tensor_type::reference; + using const_reference = typename tensor_type::const_reference; - using pointer = typename tensor_type::pointer; - using const_pointer = typename tensor_type::const_pointer; + using pointer = typename tensor_type::pointer; + using const_pointer = typename tensor_type::const_pointer; -// using iterator = typename array_type::iterator; -// using const_iterator = typename array_type::const_iterator; + // using iterator = typename array_type::iterator; + // using const_iterator = typename array_type::const_iterator; -// using reverse_iterator = typename array_type::reverse_iterator; -// using const_reverse_iterator = typename array_type::const_reverse_iterator; + // using reverse_iterator = typename array_type::reverse_iterator; + // using const_reverse_iterator = typename array_type::const_reverse_iterator; - using tensor_temporary_type = self_type; - using storage_category = dense_tag; + using tensor_temporary_type = self_type; + using storage_category = dense_tag; - using strides_type = basic_strides; - using extents_type = basic_extents; + using extents_type = extents<>; + using strides_type = typename extents_type::base_type; - using matrix_type = matrix; - using vector_type = vector; + using matrix_type = matrix; + using vector_type = vector; @@ -143,7 +130,7 @@ class subtensor > : super_type () , spans_ (detail::generate_span_vector(t.extents(),std::forward(spans)...)) , extents_ (detail::compute_extents(spans_)) - , strides_ (extents_) + , strides_ (ublas::to_strides(extents_,layout_type{})) , span_strides_ (detail::compute_span_strides(t.strides(),spans_)) , data_ {t.data() + detail::compute_offset(t.strides(), spans_)} { @@ -381,36 +368,36 @@ class subtensor > #endif - /** @brief Returns true if the subtensor is empty (\c size==0) */ - inline bool empty () const { - return this->size() == 0ul; - } +// /** @brief Returns true if the subtensor is empty (\c size==0) */ +// inline bool empty () const { +// return this->size() == 0ul; +// } - /** @brief Returns the size of the subtensor */ - inline size_type size () const { - return product(this->extents_); - } +// /** @brief Returns the size of the subtensor */ +// inline size_type size () const { +// return product(this->extents_); +// } - /** @brief Returns the size of the subtensor */ - inline size_type size (size_type r) const { - return this->extents_.at(r); - } +// /** @brief Returns the size of the subtensor */ +// inline size_type size (size_type r) const { +// return this->extents_.at(r); +// } - /** @brief Returns the number of dimensions/modes of the subtensor */ - inline size_type rank () const { - return this->extents_.size(); - } +// /** @brief Returns the number of dimensions/modes of the subtensor */ +// inline size_type rank () const { +// return this->extents_.size(); +// } - /** @brief Returns the number of dimensions/modes of the subtensor */ - inline size_type order () const { - return this->extents_.size(); - } +// /** @brief Returns the number of dimensions/modes of the subtensor */ +// inline size_type order () const { +// return this->extents_.size(); +// } - /** @brief Returns the strides of the subtensor */ - inline auto const& strides () const { - return this->strides_; - } +// /** @brief Returns the strides of the subtensor */ +// inline auto const& strides () const { +// return this->strides_; +// } /** @brief Returns the span strides of the subtensor */ inline auto const& span_strides () const { @@ -423,21 +410,34 @@ class subtensor > } - /** @brief Returns the extents of the subtensor */ - inline auto const& extents () const { - return this->extents_; - } +// /** @brief Returns the extents of the subtensor */ +// inline auto const& extents() const { +// return this->extents_; +// } - /** @brief Returns a \c const reference to the container. */ - inline const_pointer data () const { - return this->data_; - } + [[nodiscard]] inline auto empty () const noexcept { return this->size() == 0ul; } + [[nodiscard]] inline auto size () const noexcept { return product(this->extents_); } + [[nodiscard]] inline auto size (size_type r) const { return extents_.at(r); } + [[nodiscard]] inline auto rank () const { return extents_.size(); } + [[nodiscard]] inline auto order () const { return this->rank(); } + + [[nodiscard]] inline auto const& strides () const noexcept { return strides_; } + [[nodiscard]] inline auto const& getExtents () const noexcept { return extents_; } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return data_;} + [[nodiscard]] inline auto data () noexcept -> pointer { return data_;} +// [[nodiscard]] inline auto const& base () const noexcept { return _container; } - /** @brief Returns a \c const reference to the container. */ - inline pointer data () { - return this->data_; - } + +// /** @brief Returns a \c const reference to the container. */ +// inline const_pointer data() const { +// return this->data_; +// } + +// /** @brief Returns a \c const reference to the container. */ +// inline pointer data () { +// return this->data_; +// } @@ -660,20 +660,6 @@ class subtensor > } -#if 0 - // ------------- - // Serialization - // ------------- - - /// Serialize a tensor into and archive as defined in Boost - /// \param ar Archive object. Can be a flat file, an XML file or any other stream - /// \param file_version Optional file version (not yet used) - template - void serialize(Archive & ar, const unsigned int /* file_version */){ - ar & serialization::make_nvp("data",data_); - } -#endif - #endif private: @@ -686,11 +672,6 @@ class subtensor > }; -} // namespaces - - - - - +} // namespaces boost::numeric::ublas #endif diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp index 6c42763d0..4c38be404 100644 --- a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -19,9 +19,9 @@ #include #include -#include -#include -#include +#include "span.hpp" +#include "extents.hpp" +#include "tags.hpp" namespace boost::numeric::ublas::detail { @@ -36,19 +36,18 @@ namespace boost::numeric::ublas::detail { * @param[in] strides strides of the tensor, the subtensor refers to * @param[in] spans vector of spans of the subtensor */ -template -auto compute_span_strides(strides_type const& strides, spans_type const& spans) +template +auto compute_span_strides(std::vector const& strides, Spans const& spans) { if(strides.size() != spans.size()) throw std::runtime_error("Error in boost::numeric::ublas::subtensor::compute_span_strides(): tensor strides.size() != spans.size()"); - using base_type = typename strides_type::base_type; - auto span_strides = base_type(spans.size()); + auto span_strides = std::vector(spans.size()); std::transform(strides.begin(), strides.end(), spans.begin(), span_strides.begin(), [](auto w, auto const& s) { return w * s.step(); } ); - return strides_type( span_strides ); + return std::vector( span_strides ); } /*! @brief Computes the data pointer offset for a subtensor @@ -60,16 +59,14 @@ auto compute_span_strides(strides_type const& strides, spans_type const& spans) * @param[in] strides strides of the tensor, the subtensor refers to * @param[in] spans vector of spans of the subtensor */ -template -auto compute_offset(strides_type const& strides, spans_type const& spans) +template +auto compute_offset(std::vector const& strides, Spans const& spans) { if(strides.size() != spans.size()) throw std::runtime_error("Error in boost::numeric::ublas::subtensor::offset(): tensor strides.size() != spans.size()"); - using value_type = typename strides_type::value_type; - - return std::inner_product(spans.begin(), spans.end(), strides.begin(), value_type(0), - std::plus(), [](auto const& s, value_type w) {return s.first() * w; } ); + return std::inner_product(spans.begin(), spans.end(), strides.begin(), Size(0), + std::plus(), [](auto const& s, Size w) {return s.first() * w; } ); } @@ -82,7 +79,7 @@ auto compute_offset(strides_type const& strides, spans_type const& spans) template auto compute_extents(spans_type const& spans) { - using extents_t = basic_extents; + using extents_t = extents<>; using base_type = typename extents_t::base_type; if(spans.empty()) return extents_t{}; @@ -106,13 +103,13 @@ auto compute_extents(spans_type const& spans) * @param[in] extent extent that is maybe used for the tranformation */ template -auto transform_span(span const& s, size_type const extent) +auto transform_span(span const& s, std::size_t const extent) { using span_type = span; - size_type first = s.first(); - size_type last = s.last (); - size_type size = s.size (); + std::size_t first = s.first(); + std::size_t last = s.last (); + std::size_t size = s.size (); auto const extent0 = extent-1; @@ -121,41 +118,42 @@ auto transform_span(span const& s, size_type const extent) if constexpr ( is_sliced ){ if(size == 0) return span_type(0 , extent0); - else if(first== end) return span_type(extent0 , extent0); - else if(last == end) return span_type(first , extent0); + else if(first== max) return span_type(extent0 , extent0); + else if(last == max) return span_type(first , extent0); else return span_type(first , last ); } else { size_type step = s.step (); if(size == 0) return span_type(0 , size_type(1), extent0); - else if(first== end) return span_type(extent0 , step, extent0); - else if(last == end) return span_type(first , step, extent0); + else if(first== max) return span_type(extent0 , step, extent0); + else if(last == max) return span_type(first , step, extent0); else return span_type(first , step, last ); } + return span_type{}; } -template -void transform_spans_impl (basic_extents const& extents, std::array& span_array, std::size_t arg, span_types&& ... spans ); +template +void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ); -template -void transform_spans_impl(basic_extents const& extents, std::array& span_array, span const& s, span_types&& ... spans) +template +void transform_spans_impl(extents<> const& extents, std::array& span_array, span const& s, Spans&& ... spans) { std::get(span_array) = transform_span(s, extents[r]); static constexpr auto nspans = sizeof...(spans); static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); if constexpr (nspans>0) - transform_spans_impl(extents, span_array, std::forward(spans)...); + transform_spans_impl(extents, span_array, std::forward(spans)...); } -template -void transform_spans_impl (basic_extents const& extents, std::array& span_array, std::size_t arg, span_types&& ... spans ) +template +void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ) { - static constexpr auto nspans = sizeof...(spans); + static constexpr auto nspans = sizeof...(Spans); static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); - std::get(span_array) = transform_span(span_type(arg), extents[r]); + std::get(span_array) = transform_span(Span(arg), extents[r]); if constexpr (nspans>0) - transform_spans_impl(extents, span_array, std::forward(spans) ... ); + transform_spans_impl(extents, span_array, std::forward(spans) ... ); } @@ -170,15 +168,15 @@ void transform_spans_impl (basic_extents const& extents, std::array -auto generate_span_array(basic_extents const& extents, span_types&& ... spans) +template +auto generate_span_array(extents<> const& extents, Spans&& ... spans) { - constexpr static auto n = sizeof...(spans); + constexpr static auto n = sizeof...(Spans); if(extents.size() != n) throw std::runtime_error("Error in boost::numeric::ublas::generate_span_vector() when creating subtensor: the number of spans does not match with the tensor rank."); std::array span_array; if constexpr (n>0) - transform_spans_impl<0>( extents, span_array, std::forward(spans)... ); + transform_spans_impl<0>( extents, span_array, std::forward(spans)... ); return span_array; } @@ -193,10 +191,10 @@ auto generate_span_array(basic_extents const& extents, span_types&& . * @param[in] extents of the tensor * @param[in] spans spans with which the subtensor is created */ -template -auto generate_span_vector(basic_extents const& extents, span_types&& ... spans) +template +auto generate_span_vector(extents<> const& extents, Spans&& ... spans) { - auto span_array = generate_span_array(extents,std::forward(spans)...); + auto span_array = generate_span_array(extents,std::forward(spans)...); return std::vector(span_array.begin(), span_array.end()); } diff --git a/include/boost/numeric/ublas/tensor/tags.hpp b/include/boost/numeric/ublas/tensor/tags.hpp index 7774f9ccb..55bb6d084 100644 --- a/include/boost/numeric/ublas/tensor/tags.hpp +++ b/include/boost/numeric/ublas/tensor/tags.hpp @@ -12,17 +12,11 @@ #define BOOST_UBLAS_TENSOR_TAGS_HPP namespace boost::numeric::ublas{ - - struct tensor_tag{}; - - struct storage_resizable_container_tag{}; - - struct storage_static_container_tag{}; - - struct storage_seq_container_tag{}; - - struct storage_non_seq_container_tag{}; - +struct tensor_tag{}; +struct storage_resizable_container_tag{}; +struct storage_static_container_tag{}; +struct storage_seq_container_tag{}; +struct storage_non_seq_container_tag{}; } // namespace boost::numeric::ublas diff --git a/test/tensor/test_access.cpp b/test/tensor/test_access.cpp index 57ea432f9..dd0b08607 100644 --- a/test/tensor/test_access.cpp +++ b/test/tensor/test_access.cpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,8 +12,7 @@ #include #include -#include -#include +#include #include #include @@ -27,143 +26,145 @@ using layout_types = std::tuple>::with_t; -struct fixture { - using extents_t = boost::numeric::ublas::dynamic_extents<>; - using value_t = typename extents_t::value_type; - using multi_index_t = std::vector; - using index_t = value_t; - - fixture() - { - static_assert(shapes.size() == multi_index.size(),""); - static_assert(shapes.size() == indexf.size(),""); - static_assert(shapes.size() == indexl.size(),""); - static_assert(shapes.size() == ranks.size(),""); - - for(auto k = 0u; k < multi_index.size(); ++k){ - auto const& n = shapes[k]; - auto const r = ranks[k]; - assert( n.size() == r ); - for (auto const& i : multi_index[k]){ - assert( std::equal(i.begin(), i.end(), n.begin(), std::less<>{}) ) ; - } - } - } - +struct fixture +{ - static inline auto shapes = std::array - {{ - { }, - {1,1 }, - - {1,2 }, - {1,4 }, - {2,1 }, - {4,1 }, - {2,3 }, - - {2,3,1 }, - {1,2,3 }, - {3,1,2 }, - {3,2,4 }, - - {2,3,4,1}, - {1,2,3,4}, - {3,1,2,4}, - {3,2,4,5} - }}; - - static constexpr inline auto ranks = std::array - { 0,2,2,2,2,2,2,3,3,3,3,4,4,4,4 }; - - static inline auto multi_index = std::array,shapes.size()> - {{ - {{ { }, { }, { } }}, // 0 {} - {{ {0,0 }, {0,0 }, {0,0 } }}, // 1 {1,1} - - {{ {0,0 }, {0,1 }, {0,1 } }}, // 2 {1,2} - {{ {0,0 }, {0,2 }, {0,3 } }}, // 3 {1,4} - {{ {0,0 }, {1,0 }, {1,0 } }}, // 4 {2,1} - {{ {0,0 }, {2,0 }, {3,0 } }}, // 5 {4,1} - {{ {0,0 }, {1,1 }, {1,2 } }}, // 6 {2,3} - - {{ {0,0,0 }, {1,1,0 }, {1,2,0 } }}, // 7 {2,3,1} - {{ {0,0,0 }, {0,1,1 }, {0,1,2 } }}, // 8 {1,2,3} - {{ {0,0,0 }, {1,0,1 }, {2,0,1 } }}, // 9 {3,1,2} - {{ {0,0,0 }, {1,1,2 }, {2,1,3 } }}, //10 {3,2,4} - - {{ {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }}, //11 {2,3,4,1} - {{ {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }}, //12 {1,2,3,4} - {{ {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }}, //13 {3,1,2,4} - {{ {0,0,0,0}, {1,1,2,3}, {2,1,3,4} }} //14 {3,2,4,5} - }}; - - static constexpr inline auto indexf = std::array,shapes.size()> - {{ - {{0, 0, 0}}, // 0 {} - {{0, 0, 0}}, // 1 {1,1} - {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} - {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} - {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} - {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} - {{0, 3, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3} - {{0, 3, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1} - {{0, 3, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3} - {{0, 4, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2} - {{0,16, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4}, {1,3,6} - {{0,15, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {1,2,6,6} - {{0,15, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {1,1,2,6} - {{0,16, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, {1,3,3,6} - {{0,88,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {1,3,6,24} - }}; - - static constexpr inline auto indexl = std::array,shapes.size()> - {{ - {{0, 0, 0}}, // 0 {} - {{0, 0, 0}}, // 1 {1,1} - {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} - {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} - {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} - {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} - {{0, 4, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3 }, {3,1} - {{0, 4, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1 }, {3,1,1} - {{0, 4, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3 }, {6,3,1} - {{0, 3, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2 }, {2,2,1} - {{0,14, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4 }, {8,4,1} - {{0,18, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {12, 4,1,1} - {{0,18, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {24,12,4,1} - {{0,14, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, { 8, 8,4,1} - {{0,73,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {40,20,5,1} - }}; - - template - constexpr inline auto prodn(extents_type const& n) - { - return std::accumulate(n.begin(),n.end(),1ul, std::multiplies<>{}); + using extents_t = boost::numeric::ublas::extents<>; + using value_t = typename extents_t::value_type; + using multi_index_t = std::vector; + using index_t = value_t; + + fixture() + { + static_assert(shapes.size() == multi_index.size(),""); + static_assert(shapes.size() == indexf.size(),""); + static_assert(shapes.size() == indexl.size(),""); + static_assert(shapes.size() == ranks.size(),""); + + for(auto k = 0u; k < multi_index.size(); ++k){ + auto const& n = shapes[k]; + auto const r = ranks[k]; + assert( n.size() == r ); + for (auto const& i : multi_index[k]){ + assert( std::equal(i.begin(), i.end(), boost::numeric::ublas::begin(n), std::less<>{}) ) ; + } } - - // static constexpr inline auto const& e = shapes; - // static constexpr inline auto const& i = multi_indices; - - - // template struct x { static inline constexpr auto value = e[k][r]*x::value; }; - // template struct x { static inline constexpr auto value = 1; }; - // template struct x { static inline constexpr auto value = 1*x::value; }; - - // template struct y { static inline constexpr auto value = e[k][r ]*y::value; }; - // template struct y { static inline constexpr auto value = 1*y::value; }; - // template struct y { static inline constexpr auto value = e[k][p-1]; }; - - - // template static inline constexpr auto wf = x::value; - // template static inline constexpr auto wl = y::value; - - // template struct zf { static inline constexpr auto value = i[k][kk][r]*wf + zf::value; }; - // template struct zf<0,k,kk> { static inline constexpr auto value = i[k][kk][0]*wf; }; - - // template static inline constexpr auto c2 = zf<2,k,kk>::value; - // template static inline constexpr auto c3 = zf<3,k,kk>::value; - // template static inline constexpr auto c4 = zf<4,k,kk>::value; + } + + + static inline auto shapes = std::array + {{ + { }, + {1,1 }, + + {1,2 }, + {1,4 }, + {2,1 }, + {4,1 }, + {2,3 }, + + {2,3,1 }, + {1,2,3 }, + {3,1,2 }, + {3,2,4 }, + + {2,3,4,1}, + {1,2,3,4}, + {3,1,2,4}, + {3,2,4,5} + }}; + + static constexpr inline auto ranks = std::array + { 0,2,2,2,2,2,2,3,3,3,3,4,4,4,4 }; + + static inline auto multi_index = std::array,shapes.size()> + {{ + {{ { }, { }, { } }}, // 0 {} + {{ {0,0 }, {0,0 }, {0,0 } }}, // 1 {1,1} + + {{ {0,0 }, {0,1 }, {0,1 } }}, // 2 {1,2} + {{ {0,0 }, {0,2 }, {0,3 } }}, // 3 {1,4} + {{ {0,0 }, {1,0 }, {1,0 } }}, // 4 {2,1} + {{ {0,0 }, {2,0 }, {3,0 } }}, // 5 {4,1} + {{ {0,0 }, {1,1 }, {1,2 } }}, // 6 {2,3} + + {{ {0,0,0 }, {1,1,0 }, {1,2,0 } }}, // 7 {2,3,1} + {{ {0,0,0 }, {0,1,1 }, {0,1,2 } }}, // 8 {1,2,3} + {{ {0,0,0 }, {1,0,1 }, {2,0,1 } }}, // 9 {3,1,2} + {{ {0,0,0 }, {1,1,2 }, {2,1,3 } }}, //10 {3,2,4} + + {{ {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }}, //11 {2,3,4,1} + {{ {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }}, //12 {1,2,3,4} + {{ {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }}, //13 {3,1,2,4} + {{ {0,0,0,0}, {1,1,2,3}, {2,1,3,4} }} //14 {3,2,4,5} + }}; + + static constexpr inline auto indexf = std::array,shapes.size()> + {{ + {{0, 0, 0}}, // 0 {} + {{0, 0, 0}}, // 1 {1,1} + {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} + {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} + {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} + {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} + {{0, 3, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3} + {{0, 3, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1} + {{0, 3, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3} + {{0, 4, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2} + {{0,16, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4}, {1,3,6} + {{0,15, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {1,2,6,6} + {{0,15, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {1,1,2,6} + {{0,16, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, {1,3,3,6} + {{0,88,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {1,3,6,24} + }}; + + static constexpr inline auto indexl = std::array,shapes.size()> + {{ + {{0, 0, 0}}, // 0 {} + {{0, 0, 0}}, // 1 {1,1} + {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} + {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} + {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} + {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} + {{0, 4, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3 }, {3,1} + {{0, 4, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1 }, {3,1,1} + {{0, 4, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3 }, {6,3,1} + {{0, 3, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2 }, {2,2,1} + {{0,14, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4 }, {8,4,1} + {{0,18, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {12, 4,1,1} + {{0,18, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {24,12,4,1} + {{0,14, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, { 8, 8,4,1} + {{0,73,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {40,20,5,1} + }}; + + template + constexpr inline auto prodn(extents_type const& n) + { + return std::accumulate(boost::numeric::ublas::begin(n),boost::numeric::ublas::end(n),1ul, std::multiplies<>{}); + } + + // static constexpr inline auto const& e = shapes; + // static constexpr inline auto const& i = multi_indices; + + + // template struct x { static inline constexpr auto value = e[k][r]*x::value; }; + // template struct x { static inline constexpr auto value = 1; }; + // template struct x { static inline constexpr auto value = 1*x::value; }; + + // template struct y { static inline constexpr auto value = e[k][r ]*y::value; }; + // template struct y { static inline constexpr auto value = 1*y::value; }; + // template struct y { static inline constexpr auto value = e[k][p-1]; }; + + + // template static inline constexpr auto wf = x::value; + // template static inline constexpr auto wl = y::value; + + // template struct zf { static inline constexpr auto value = i[k][kk][r]*wf + zf::value; }; + // template struct zf<0,k,kk> { static inline constexpr auto value = i[k][kk][0]*wf; }; + + // template static inline constexpr auto c2 = zf<2,k,kk>::value; + // template static inline constexpr auto c3 = zf<3,k,kk>::value; + // template static inline constexpr auto c4 = zf<4,k,kk>::value; @@ -173,70 +174,70 @@ struct fixture { BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index, layout_t, layout_types, fixture ) { - namespace ub = boost::numeric::ublas; - namespace mp = boost::mp11; - using strides_t = ub::basic_strides; - - - constexpr auto is_first_order = std::is_same_v; - constexpr auto const& index = is_first_order ? indexf : indexl; - - mp::mp_for_each>( [&]( auto I ) { - auto const& n = std::get(shapes); - auto const& i = std::get(multi_index); - auto const& jref = std::get(index); - mp::mp_for_each>( [&]( auto K ) { - auto const& ii = std::get(i); - auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,strides_t(n).begin()); - BOOST_CHECK(j < prodn(n)); - BOOST_CHECK_EQUAL(j,jref[K]); - }); + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; + + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + mp::mp_for_each>( [&]( auto I ) { + auto const& n = std::get(shapes); + auto const& w = ub::to_strides(n,layout_t{}); + auto const& i = std::get(multi_index); + auto const& jref = std::get(index); + mp::mp_for_each>( [&]( auto K ) { + auto const& ii = std::get(i); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,w.begin()); + BOOST_CHECK(j < prodn(n)); + BOOST_CHECK_EQUAL(j,jref[K]); }); + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_static_rank, layout_t, layout_types, fixture ) { - namespace ub = boost::numeric::ublas; - namespace mp = boost::mp11; - using strides_t = ub::basic_strides; - - constexpr auto is_first_order = std::is_same_v; - constexpr auto const& index = is_first_order ? indexf : indexl; - - mp::mp_for_each>( [&]( auto I ) { - auto const& n = std::get(shapes); - auto const& i = std::get(multi_index); - auto const& jref = std::get(index); - constexpr auto r = std::get(ranks); - mp::mp_for_each>( [&]( auto K ) { - auto const& ii = std::get(i); - auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,strides_t(n).begin()); - BOOST_CHECK(j < prodn(n)); - BOOST_CHECK_EQUAL(j,jref[K]); - }); + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + mp::mp_for_each>( [&]( auto I ) { + auto const& n = std::get(shapes); + auto const& w = ub::to_strides(n,layout_t{}); + auto const& i = std::get(multi_index); + auto const& jref = std::get(index); + constexpr auto r = std::get(ranks); + mp::mp_for_each>( [&]( auto K ) { + auto const& ii = std::get(i); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() , w.begin()); + BOOST_CHECK(j < prodn(n)); + BOOST_CHECK_EQUAL(j,jref[K]); }); + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index, layout_t, layout_types, fixture ) { - using namespace boost::numeric::ublas; - using strides_t = basic_strides; + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; - constexpr auto is_first_order = std::is_same_v; + constexpr auto is_first_order = std::is_same_v; constexpr auto const& index = is_first_order ? indexf : indexl; for(auto k = 0u; k < index.size(); ++k){ auto const& n = shapes[k]; + auto const& w = ub::to_strides(n,layout_t{}); auto const& iref = multi_index[k]; - auto const& w = strides_t(n); auto const& jref = index[k]; for(auto kk = 0u; kk < iref.size(); ++kk){ auto const jj = jref[kk]; auto const& ii = iref[kk]; auto i = multi_index_t(w.size()); - detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); + ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); // if constexpr ( is_first_order ) // detail::compute_multi_index_first(jj, w.begin(), w.end(), i.begin()); // else @@ -260,7 +261,6 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t { namespace ub = boost::numeric::ublas; namespace mp = boost::mp11; - using strides_t = ub::basic_strides; constexpr auto is_first_order = std::is_same_v; constexpr auto const& index = is_first_order ? indexf : indexl; @@ -270,7 +270,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t auto const& n = std::get(shapes); auto const& iref = std::get(multi_index); auto const& jref = std::get(index); - auto const& w = strides_t(n); + auto const& w = ub::to_strides(n,layout_t{}); constexpr auto r = std::get(ranks); mp::mp_for_each>( [&]( auto K ) { auto const jj = std::get(jref); @@ -287,22 +287,21 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_subtensor, layout_t, layout_types, fixture ) { - using namespace boost::numeric::ublas; - using strides_t = basic_strides; + namespace ub = boost::numeric::ublas; // subtensor the whole index-domain of a tensor - constexpr auto is_first_order = std::is_same_v; + constexpr auto is_first_order = std::is_same_v; constexpr auto const& index = is_first_order ? indexf : indexl; // subtensor the whole index-domain of a tensor for(auto k = 0u; k < index.size(); ++k){ auto const& n = shapes[k]; - auto const& w = strides_t(n); + auto const& w = ub::to_strides(n,layout_t{}); auto const& jref = index[k]; for(auto kk = 0u; kk < jref.size(); ++kk){ auto const jj = jref[kk]; - auto const j = detail::compute_single_index(jj,w.begin(),w.end(),w.begin()); + auto const j = ub::detail::compute_single_index(jj,w.begin(),w.end(),w.begin()); BOOST_CHECK_EQUAL ( j, jj ) ; } } diff --git a/test/tensor/test_strides.cpp b/test/tensor/test_strides.cpp index 2c45b6e12..f31a2d74f 100644 --- a/test/tensor/test_strides.cpp +++ b/test/tensor/test_strides.cpp @@ -112,6 +112,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(test_strides_static_rank_ctr, check(n1111, 4ul); check(n4231, 4ul); } + } BOOST_TEST_DECORATOR( diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index 24788ce6b..f3db7334d 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -14,60 +14,57 @@ #include #include "utility.hpp" -#include + +#include #include #include #include -BOOST_AUTO_TEST_SUITE ( subtensor_testsuite ) ; - -// double,std::complex - - +BOOST_AUTO_TEST_SUITE ( subtensor_testsuite ) -using test_types = zip>::with_t; +using test_types = zip>::with_t; struct fixture_shape { - using shape = boost::numeric::ublas::basic_extents; - - fixture_shape() : extents{ - shape{}, // 0 - shape{1,1}, // 1 - shape{1,2}, // 2 - shape{2,1}, // 3 - shape{2,3}, // 4 - shape{2,3,1}, // 5 - shape{4,1,3}, // 6 - shape{1,2,3}, // 7 - shape{4,2,3}, // 8 - shape{4,2,3,5} // 9 - } - {} - std::vector extents; + using shape = boost::numeric::ublas::extents<>; + + fixture_shape() : extents{ + shape{}, // 0 + shape{1,1}, // 1 + shape{1,2}, // 2 + shape{2,1}, // 3 + shape{2,3}, // 4 + shape{2,3,1}, // 5 + shape{4,1,3}, // 6 + shape{1,2,3}, // 7 + shape{4,2,3}, // 8 + shape{4,2,3,5} // 9 + } + {} + std::vector extents; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixture_shape ) { - namespace ub = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ub::dynamic_tensor; - using subtensor_type = ub::subtensor; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + using subtensor_type = ublas::subtensor; auto check = [](auto const& e) { - auto t = tensor_type{e}; + auto t = tensor_type(e); auto s = subtensor_type(t); BOOST_CHECK_EQUAL ( s.size() , t.size() ); BOOST_CHECK_EQUAL ( s.rank() , t.rank() ); - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK_EQUAL ( s.empty(), t.empty() ); BOOST_CHECK_EQUAL ( s. data(), t. data() ); } @@ -88,125 +85,125 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) { namespace ub = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ub::dynamic_tensor; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ub::tensor_dynamic; using subtensor_type = ub::subtensor; using span = ub::sliced_span; - { - auto A = tensor_type{}; - auto Asub = subtensor_type( A ); + { + auto A = tensor_type{}; + auto Asub = subtensor_type( A ); - BOOST_CHECK( Asub.span_strides() == A.strides() ); - BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.extents() == A.extents() ); - BOOST_CHECK( Asub.data() == A.data() ); - } + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.getExtents()() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } - { - auto A = tensor_type{1,1}; - auto Asub = subtensor_type( A, 0, 0 ); + { + auto A = tensor_type{1,1}; + auto Asub = subtensor_type( A, 0, 0 ); - BOOST_CHECK( Asub.span_strides() == A.strides() ); - BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.extents() == A.extents() ); - BOOST_CHECK( Asub.data() == A.data() ); - } + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.getExtents()() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } - { - auto A = tensor_type{1,2}; + { + auto A = tensor_type{1,2}; auto Asub = subtensor_type( A, 0, span{} ); - BOOST_CHECK( Asub.span_strides() == A.strides() ); - BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.extents() == A.extents() ); - BOOST_CHECK( Asub.data() == A.data() ); - } - { - auto A = tensor_type{1,2}; - auto Asub = subtensor_type( A, 0, 1 ); + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.getExtents()() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } + { + auto A = tensor_type{1,2}; + auto Asub = subtensor_type( A, 0, 1 ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), 1 ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), 1 ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), 1 ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), 1 ); - BOOST_CHECK_EQUAL( Asub.strides().at(0), 1 ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), 1 ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), 1 ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), 1 ); - BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); - BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 1 ); - BOOST_CHECK_EQUAL( Asub.data() , A.data()+ - Asub.spans().at(0).first()*A.strides().at(0) + - Asub.spans().at(1).first()*A.strides().at(1) ); - } + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } - { - auto A = tensor_type{2,3}; - auto Asub = subtensor_type( A, 0, 1 ); - auto B = tensor_type(Asub.extents()); + { + auto A = tensor_type{2,3}; + auto Asub = subtensor_type( A, 0, 1 ); + auto B = tensor_type(Asub.getExtents()()); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); - BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 1 ); - BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.data() , A.data()+ - Asub.spans().at(0).first()*A.strides().at(0) + - Asub.spans().at(1).first()*A.strides().at(1) ); - } + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } - { - auto A = tensor_type{4,3}; - auto Asub = subtensor_type( A, span(1,2), span(1,ub::end) ); - auto B = tensor_type(Asub.extents()); + { + auto A = tensor_type{4,3}; + auto Asub = subtensor_type( A, span(1,2), span(1,ub::max) ); + auto B = tensor_type(Asub.getExtents()()); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); - BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 2 ); - BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.data() , A.data()+ - Asub.spans().at(0).first()*A.strides().at(0) + - Asub.spans().at(1).first()*A.strides().at(1) ); - } + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } - { - auto A = tensor_type{4,3,5}; - auto Asub = subtensor_type( A, span(1,2), span(1,ub::end), span(2,4) ); + { + auto A = tensor_type{4,3,5}; + auto Asub = subtensor_type( A, span(1,2), span(1,ub::max), span(2,4) ); - auto B = tensor_type(Asub.extents()); + auto B = tensor_type(Asub.getExtents()()); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(2), A.strides().at(2) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(2), A.strides().at(2) ); - BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); - BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); - BOOST_CHECK_EQUAL( Asub.extents().at(2) , 3 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 2 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(2) , 3 ); - BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.strides().at(2), B.strides().at(2) ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(2), B.strides().at(2) ); - BOOST_CHECK_EQUAL( Asub.data() , A.data()+ - Asub.spans().at(0).first()*A.strides().at(0) + - Asub.spans().at(1).first()*A.strides().at(1)+ - Asub.spans().at(2).first()*A.strides().at(2)); - } + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1)+ + Asub.spans().at(2).first()*A.strides().at(2)); + } } @@ -214,17 +211,17 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, fixture_shape ) { - namespace ub = boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ub::dynamic_tensor; - using subtensor_type = ub::subtensor; - using span = ub::sliced_span; + using tensor_type = ublas::tensor_dynamic; + using subtensor_type = ublas::subtensor; + // using span = ub::sliced_span; - auto check = [](auto const& e) - { + auto check = [](auto const& e) + { auto A = tensor_type{e}; value_type i{}; @@ -237,12 +234,12 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, f BOOST_CHECK( Asub.span_strides() == A.strides() ); BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.getExtents()() == A.extents() ); BOOST_CHECK( Asub.data() == A.data() ); BOOST_CHECK( Bsub.span_strides() == A.strides() ); BOOST_CHECK( Bsub.strides() == A.strides() ); - BOOST_CHECK( Bsub.extents() == A.extents() ); + BOOST_CHECK( Bsub.getExtents() == A.extents() ); BOOST_CHECK( Bsub.data() == A.data() ); BOOST_CHECK_EQUAL ( Bsub.size() , A.size() ); @@ -250,22 +247,22 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, f - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK ( Bsub.empty() ); BOOST_CHECK_EQUAL ( Bsub.data() , nullptr); - } - else{ + } + else{ BOOST_CHECK ( !Bsub.empty() ); BOOST_CHECK_NE ( Bsub.data() , nullptr); - } + } for(auto i = 0ul; i < Asub.size(); ++i) BOOST_CHECK_EQUAL( Asub[i], Bsub[i] ); - }; + }; - for(auto const& e : extents) - check(e); + for(auto const& e : extents) + check(e); } @@ -592,4 +589,4 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_ty #endif -BOOST_AUTO_TEST_SUITE_END(); +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index 2c4cace00..b96fb5b57 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -15,14 +15,13 @@ #include "utility.hpp" #include -#include -#include +#include #include #include -BOOST_AUTO_TEST_SUITE ( subtensor_utility_testsuite ) ; +BOOST_AUTO_TEST_SUITE ( subtensor_utility_testsuite ) @@ -36,8 +35,8 @@ struct fixture_sliced_span { span_type(0,2), // 2, a(0:2) span_type(1,1), // 3, a(1:1) span_type(1,3), // 4, a(1:3) - span_type(1,boost::numeric::ublas::end), // 5, a(1:end) - span_type(boost::numeric::ublas::end) // 6, a(end) + span_type(1,boost::numeric::ublas::max), // 5, a(1:end) + span_type(boost::numeric::ublas::max) // 6, a(end) } {} std::vector spans; @@ -47,7 +46,7 @@ struct fixture_sliced_span { BOOST_FIXTURE_TEST_CASE( transform_sliced_span_test, fixture_sliced_span ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; // template BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(2) ) == ublas::sliced_span(0,1) ); @@ -91,8 +90,8 @@ struct fixture_strided_span { span_type(0,2,2), // 2, a(0:2:2) span_type(1,1,1), // 3, a(1:1:1) span_type(1,1,3), // 4, a(1:1:3) - span_type(1,2,boost::numeric::ublas::end), // 5, a(1:2:end) - span_type(boost::numeric::ublas::end) // 6, a(end) + span_type(1,2,boost::numeric::ublas::max), // 5, a(1:2:end) + span_type(boost::numeric::ublas::max) // 6, a(end) } {} std::vector spans; @@ -140,7 +139,7 @@ BOOST_FIXTURE_TEST_CASE( transform_strided_span_test, fixture_strided_span ) struct fixture_shape { - using shape = boost::numeric::ublas::basic_extents; + using shape = boost::numeric::ublas::extents<>; fixture_shape() : extents{ shape{}, // 0 @@ -160,12 +159,12 @@ struct fixture_shape { BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) { - using namespace boost::numeric::ublas; - using span = sliced_span; + namespace ublas = boost::numeric::ublas; + using span = ublas::sliced_span; // shape{} { - auto v = detail::generate_span_array(extents[0]); + auto v = ublas::detail::generate_span_array(extents[0]); auto r = std::vector{}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } @@ -173,67 +172,67 @@ BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) // shape{1,1} { - auto v = detail::generate_span_array(extents[1],span(),span()); + auto v = ublas::detail::generate_span_array(extents[1],span(),span()); auto r = std::vector{span(0,0),span(0,0)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{1,1} - { - auto v = detail::generate_span_array(extents[1],end,span(end)); + { + auto v = ublas::detail::generate_span_array(extents[1],ublas::max,span(ublas::max)); auto r = std::vector{span(0,0),span(0,0)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{1,1} - { - auto v = detail::generate_span_array(extents[1],0,end); + { + auto v = ublas::detail::generate_span_array(extents[1],0,ublas::max); auto r = std::vector{span(0,0),span(0,0)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{1,2} - { - auto v = detail::generate_span_array(extents[2],0,end); + { + auto v = ublas::detail::generate_span_array(extents[2],0,ublas::max); auto r = std::vector{span(0,0),span(1,1)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{1,2} { - auto v = detail::generate_span_array(extents[2],0,1); + auto v = ublas::detail::generate_span_array(extents[2],0,1); auto r = std::vector{span(0,0),span(1,1)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } { - auto v = detail::generate_span_array(extents[2],span(),span()); + auto v = ublas::detail::generate_span_array(extents[2],span(),span()); auto r = std::vector{span(0,0),span(0,1)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{2,3} { - auto v = detail::generate_span_array(extents[4],span(),span()); + auto v = ublas::detail::generate_span_array(extents[4],span(),span()); auto r = std::vector{span(0,1),span(0,2)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } { - auto v = detail::generate_span_array(extents[4],1,span(1,end)); + auto v = ublas::detail::generate_span_array(extents[4],1,span(1,ublas::max)); auto r = std::vector{span(1,1),span(1,2)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{2,3,1} { - auto v = detail::generate_span_array(extents[5],span(),span(),0); + auto v = ublas::detail::generate_span_array(extents[5],span(),span(),0); auto r = std::vector{span(0,1),span(0,2),span(0,0)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } { - auto v = detail::generate_span_array(extents[5],1,span(),end); + auto v = ublas::detail::generate_span_array(extents[5],1,span(),ublas::max); auto r = std::vector{span(1,1),span(0,2),span(0,0)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } @@ -242,7 +241,7 @@ BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) struct fixture_span_vector_shape { - using shape = boost::numeric::ublas::basic_extents; + using shape = boost::numeric::ublas::extents<>; using span = boost::numeric::ublas::sliced_span; @@ -292,57 +291,57 @@ struct fixture_span_vector_shape { BOOST_FIXTURE_TEST_CASE( extents_test, fixture_span_vector_shape ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; - BOOST_CHECK ( std::equal( std::get<0>(reference_).begin(), std::get<0>(reference_).end(), ublas::detail::compute_extents( std::get<0>(span_vectors_) ).begin() ) ); - BOOST_CHECK ( std::equal( std::get<1>(reference_).begin(), std::get<1>(reference_).end(), ublas::detail::compute_extents( std::get<1>(span_vectors_) ).begin() ) ); - BOOST_CHECK ( std::equal( std::get<2>(reference_).begin(), std::get<2>(reference_).end(), ublas::detail::compute_extents( std::get<2>(span_vectors_) ).begin() ) ); - BOOST_CHECK ( std::equal( std::get<3>(reference_).begin(), std::get<3>(reference_).end(), ublas::detail::compute_extents( std::get<3>(span_vectors_) ).begin() ) ); - BOOST_CHECK ( std::equal( std::get<4>(reference_).begin(), std::get<4>(reference_).end(), ublas::detail::compute_extents( std::get<4>(span_vectors_) ).begin() ) ); - BOOST_CHECK ( std::equal( std::get<5>(reference_).begin(), std::get<5>(reference_).end(), ublas::detail::compute_extents( std::get<5>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<0>(reference_)), ublas::begin(std::get<0>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<0>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<1>(reference_)), ublas::begin(std::get<1>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<1>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<2>(reference_)), ublas::begin(std::get<2>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<2>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<3>(reference_)), ublas::begin(std::get<3>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<3>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<4>(reference_)), ublas::begin(std::get<4>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<4>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<5>(reference_)), ublas::begin(std::get<5>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<5>(span_vectors_) ) ) ) ); } -using test_types = std::tuple; +using test_types = std::tuple; BOOST_FIXTURE_TEST_CASE_TEMPLATE( offset_test, layout, test_types, fixture_span_vector_shape ) { - using namespace boost::numeric; - using strides = ublas::basic_strides; + namespace ublas = boost::numeric::ublas; + { auto s = std::get<0>(span_vectors_); - auto w = strides( std::get<0>(extents_) ); + auto w = ublas::to_strides( std::get<0>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<1>(span_vectors_); - auto w = strides( std::get<1>(extents_) ); + auto w = ublas::to_strides( std::get<1>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<2>(span_vectors_); - auto w = strides( std::get<2>(extents_) ); + auto w = ublas::to_strides( std::get<2>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<3>(span_vectors_); - auto w = strides( std::get<3>(extents_) ); + auto w = ublas::to_strides( std::get<3>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] ); } { auto s = std::get<4>(span_vectors_); - auto w = strides( std::get<4>(extents_) ); + auto w = ublas::to_strides( std::get<4>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] ); } @@ -350,7 +349,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( offset_test, layout, test_types, fixture_span_ { auto s = std::get<5>(span_vectors_); - auto w = ( std::get<5>(extents_) ); + auto w = ublas::to_strides( std::get<5>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] + s[3].first()*w[3] ); } @@ -391,4 +390,4 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( span_strides_test, layout, test_types, fixture #endif -BOOST_AUTO_TEST_SUITE_END(); +BOOST_AUTO_TEST_SUITE_END() From 0df6293cf19ef1ba18331df9e00e93ebcf2a2bb1 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Fri, 4 Jun 2021 21:23:03 +0200 Subject: [PATCH 6/9] improve product function for tensor-times-vector. --- .../tensor/extents/extents_functions.hpp | 31 +++ .../tensor/function/tensor_times_vector.hpp | 258 +++++++++++++++--- .../numeric/ublas/tensor/multiplication.hpp | 102 +++---- .../boost/numeric/ublas/tensor/subtensor.hpp | 6 +- .../ublas/tensor/subtensor_utility.hpp | 8 +- .../tensor/tensor/tensor_static_rank.hpp | 15 + test/tensor/test_access.cpp | 52 ++-- test/tensor/test_subtensor.cpp | 4 +- test/tensor/test_subtensor_utility.cpp | 24 +- 9 files changed, 359 insertions(+), 141 deletions(-) diff --git a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp index 149e12bde..cfa247b4d 100644 --- a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp +++ b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp @@ -77,6 +77,37 @@ template // std::all_of(cbegin(e)+2,cend(e) , [](auto a){return a==1UL;}); } +/** @brief Returns true if extents equals (m,[1,1,...,1]) with m>=1 */ +template +[[nodiscard]] inline constexpr bool is_row_vector(extents_base const& e) +{ + if (empty(e) || size(e) == 1 ) {return false;} + + if(cbegin(e)[0] == 1ul && + cbegin(e)[1] > 1ul && + std::all_of(cbegin(e)+2ul,cend (e) , [](auto a){return a==1ul;})){ + return true; + } + + return false; +} + + +/** @brief Returns true if extents equals (m,[1,1,...,1]) with m>=1 */ +template +[[nodiscard]] inline constexpr bool is_col_vector(extents_base const& e) +{ + if (empty(e) || size(e) == 1 ) {return false;} + + if(cbegin(e)[0] > 1ul && + cbegin(e)[1] == 1ul && + std::all_of(cbegin(e)+2ul,cend (e) , [](auto a){return a==1ul;})){ + return true; + } + + return false; +} + /** @brief Returns true if (m,[n,1,...,1]) with m>=1 or n>=1 */ template [[nodiscard]] inline constexpr bool is_matrix(extents_base const& e) diff --git a/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp index 5702f97fb..611c5a82e 100644 --- a/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp +++ b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp @@ -15,6 +15,7 @@ #include #include +#include "../multiplication.hpp" #include "../extents.hpp" #include "../type_traits.hpp" #include "../tags.hpp" @@ -49,6 +50,190 @@ using enable_ttv_if_extent_has_dynamic_rank = std::enable_if_t +inline auto scalar_scalar_prod(TA const &a, V const &b, EC const& nc_base) +{ + assert(ublas::is_scalar(a.extents())); + using tensor = TC; + using value = typename tensor::value_type; + using shape = typename tensor::extents_type; + return tensor(shape(nc_base),value(a[0]*b(0))); +} + +template +inline auto vector_vector_prod(TA const &a, V const &b, EC& nc_base, std::size_t m) +{ + auto const& na = a.extents(); + + assert( ublas::is_vector(na)); + assert(!ublas::is_scalar(na)); + assert( ublas::size(na) > 1u); + assert(m > 0); + + using tensor = TC; + using value = typename tensor::value_type; + using shape = typename tensor::extents_type; + + auto const n1 = na[0]; + auto const n2 = na[1]; + auto const s = b.size(); + + // general + // [n1 n2 1 ... 1] xj [s 1] for any 1 <= j <= p with n1==1 or n2==1 + + + // [n1 1 1 ... 1] x1 [n1 1] -> [1 1 1 ... 1] + // [1 n2 1 ... 1] x2 [n2 1] -> [1 1 1 ... 1] + + + assert(n1>1 || n2>1); + + if( (n1>1u && m==1u) || (n2>1u && m==2u) ){ + if(m==1u) assert(n2==1u && n1==s); + if(m==2u) assert(n1==1u && n2==s); + auto cc = std::inner_product( a.begin(), a.end(), b.begin(), value(0) ); + return tensor(shape(nc_base),value(cc)); + } + + // [n1 1 1 ... 1] xj [1 1] -> [n1 1 1 ... 1] with j != 1 + // [1 n2 1 ... 1] xj [1 1] -> [1 n2 1 ... 1] with j != 2 + +//if( (n1>1u && m!=1u) && (n2>0u && m!=2u) ){ + + if(n1>1u) assert(m!=1u); + if(n2>1u) assert(m!=2u); + assert(s==1u); + + if(n1>1u) assert(n2==1u); + if(n2>1u) assert(n1==1u); + + if(n1>1u) nc_base[0] = n1; + if(n2>1u) nc_base[1] = n2; + + auto bb = b(0); + auto c = tensor(shape(nc_base)); + std::transform(a.begin(),a.end(),c.begin(),[bb](auto aa){ return aa*bb; }); + return c; +//} + + +} + + +/** Computes a matrix-vector product. + * + * + * @note assume stride 1 for specific dimensions and therefore requires refactoring for subtensor + * +*/ +template +inline auto matrix_vector_prod(TA const &a, V const &b, EC& nc_base, std::size_t m) +{ + auto const& na = a.extents(); + + assert( ublas::is_matrix(na)); + assert(!ublas::is_vector(na)); + assert(!ublas::is_scalar(na)); + assert( ublas::size(na) > 1u); + assert(m > 0); + + using tensor = TC; + using shape = typename tensor::extents_type; + using size_t = typename shape::value_type; + + auto const n1 = na[0]; + auto const n2 = na[1]; + auto const s = b.size(); + + // general + // [n1 n2 1 ... 1] xj [s 1] for any 1 <= j <= p with either n1>1 and n2>1 + + + // if [n1 n2 1 ... 1] xj [1 1] -> [n1 n2 1 ... 1] for j > 2 + if(m > 2){ + nc_base[0] = n1; + nc_base[1] = n2; + assert(s == 1); + auto c = tensor(shape(nc_base)); + auto const bb = b(0); + std::transform(a.begin(),a.end(), c.begin(), [bb](auto aa){return aa*bb;}); + return c; + } + + + // [n1 n2 1 ... 1] x1 [n1 1] -> [n2 1 ... 1] -> vector-times-matrix + // [n1 n2 1 ... 1] x2 [n2 1] -> [n1 1 ... 1] -> matrix-times-vector + + nc_base[0] = m==1 ? n2 : n1; + + auto c = tensor(shape(nc_base)); + auto const& wa = a.strides(); + auto const* bdata = &(b(0)); + + detail::recursive::mtv(m-1,n1,n2, c.data(), size_t(1), a.data(), wa[0], wa[1], bdata, size_t(1)); + + return c; +} + + + +template +inline auto tensor_vector_prod(TA const &a, V const &b, EC& nc_base, std::size_t m) +{ + auto const& na = a.extents(); + + assert( ublas::is_tensor(na)); + assert( ublas::size(na) > 1u); + assert(m > 0); + + using tensor = TC; + using shape = typename tensor::extents_type; + using layout = typename tensor::layout_type; + + auto const pa = a.rank(); + auto const nm = na[m-1]; + auto const s = b.size(); + + auto nb = extents<2>{std::size_t(b.size()),std::size_t(1ul)}; + auto wb = ublas::to_strides(nb,layout{} ); + + //TODO: Include an outer product when legacy vector becomes a new vector. + + for (auto i = 0ul, j = 0ul; i < pa; ++i) + if (i != m - 1) + nc_base[j++] = na.at(i); + + auto c = tensor(shape(nc_base)); + + // [n1 n2 ... nm ... np] xm [1 1] -> [n1 n2 ... nm-1 nm+1 ... np] + + if(s == 0){ + assert(nm == 1); + auto const bb = b(0); + std::transform(a.begin(),a.end(), c.begin(), [bb](auto aa){return aa*bb;}); + return c; + } + + + // if [n1 n2 n3 ... np] xm [nm 1] -> [n1 n2 ... nm-1 nm+1 ... np] + + auto const& nc = c.extents(); + auto const& wc = c.strides(); + auto const& wa = a.strides(); + auto const* bp = &(b(0)); + + ttv(m, pa, + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + bp, nb.data(), wb.data()); + + return c; +} + +}//namespace detail + + /** @brief Computes the m-mode tensor-times-vector product * * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] @@ -63,45 +248,49 @@ using enable_ttv_if_extent_has_dynamic_rank = std::enable_if_t::value, detail::enable_ttv_if_extent_has_dynamic_rank = true > -inline decltype(auto) prod( tensor_core< TE > const &a, vector const &b, const std::size_t m) +inline auto prod( tensor_core< TE > const &a, vector const &b, const std::size_t m) { using tensor = tensor_core< TE >; using shape = typename tensor::extents_type; - using value = typename tensor::value_type; - using layout = typename tensor::layout_type; using resize_tag = typename tensor::resizable_tag; - auto const p = a.rank(); + auto const pa = a.rank(); static_assert(std::is_same_v); static_assert(is_dynamic_v); if (m == 0ul) throw std::length_error("error in boost::numeric::ublas::prod(ttv): contraction mode must be greater than zero."); - if (p < m) throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the contraction mode."); + if (pa < m) throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the contraction mode."); if (a.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty."); if (b.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty."); auto const& na = a.extents(); - auto nb = extents<2>{std::size_t(b.size()),std::size_t(1ul)}; - auto wb = ublas::to_strides(nb,layout{} ); + + if(b.size() != na[m-1]) throw std::length_error("error in boost::numeric::ublas::prod(ttv): dimension mismatch of tensor and vector."); auto const sz = std::max( std::size_t(ublas::size(na)-1u), std::size_t(2) ); auto nc_base = typename shape::base_type(sz,1); - for (auto i = 0ul, j = 0ul; i < p; ++i) - if (i != m - 1) - nc_base[j++] = na.at(i); + // output scalar tensor + if(ublas::is_scalar(na)){ + return detail::scalar_scalar_prod(a,b,nc_base); + } + + // output scalar tensor or vector tensor + if (ublas::is_vector(na)){ + return detail::vector_vector_prod(a,b,nc_base,m); + } + + // output scalar tensor or vector tensor + if (ublas::is_matrix(na)){ + return detail::matrix_vector_prod(a,b,nc_base,m); + } + + assert(ublas::is_tensor(na)); + return detail::tensor_vector_prod(a,b,nc_base,m); - auto nc = shape(nc_base); - auto c = tensor( nc, value{} ); - auto const* bb = &(b(0)); - ttv(m, p, - c.data(), c.extents().data(), c.strides().data(), - a.data(), a.extents().data(), a.strides().data(), - bb, nb.data(), wb.data()); - return c; } @@ -143,7 +332,6 @@ inline auto prod( tensor_core< TE > const &a, vector const &b, const std:: constexpr auto p = std::tuple_size_v; constexpr auto sz = std::max(std::size_t(std::tuple_size_v-1U),std::size_t(2)); - using shape_b = ublas::extents<2>; using shape_c = ublas::extents; using tensor_c = tensor_core>; @@ -158,21 +346,25 @@ inline auto prod( tensor_core< TE > const &a, vector const &b, const std:: auto nc_base = typename shape_c::base_type{}; std::fill(nc_base.begin(), nc_base.end(),std::size_t(1)); - for (auto i = 0ul, j = 0ul; i < p; ++i) - if (i != m - 1) - nc_base[j++] = na.at(i); - auto nc = shape_c(std::move(nc_base)); - auto nb = shape_b{b.size(),1UL}; - auto wb = ublas::to_strides(nb,layout{}); - auto c = tensor_c( std::move(nc) ); - auto const* bb = &(b(0)); - ttv(m, p, - c.data(), c.extents().data(), c.strides().data(), - a.data(), a.extents().data(), a.strides().data(), - bb, nb.data(), wb.data() ); - return c; + // output scalar tensor + if(ublas::is_scalar(na)){ + return detail::scalar_scalar_prod(a,b,nc_base); + } + + // output scalar tensor or vector tensor + if (ublas::is_vector(na)){ + return detail::vector_vector_prod(a,b,nc_base,m); + } + + // output scalar tensor or vector tensor + if (ublas::is_matrix(na)){ + return detail::matrix_vector_prod(a,b,nc_base,m); + } + + assert(ublas::is_tensor(na)); + return detail::tensor_vector_prod(a,b,nc_base,m); } @@ -201,7 +393,7 @@ inline auto prod( tensor_core< TE > const &a, vector const &b) using shape = typename tensor::extents; using layout = typename tensor::layout; using shape_b = extents<2>; - using shape_c = remove_element_t; + using shape_c = remove_element_t; // this is wrong using container_c = rebind_storage_size_t; using tensor_c = tensor_core>; diff --git a/include/boost/numeric/ublas/tensor/multiplication.hpp b/include/boost/numeric/ublas/tensor/multiplication.hpp index e2d94f7be..ea7901814 100644 --- a/include/boost/numeric/ublas/tensor/multiplication.hpp +++ b/include/boost/numeric/ublas/tensor/multiplication.hpp @@ -337,33 +337,44 @@ void ttv0(SizeType const r, /** @brief Computes the matrix-times-vector product * - * Implements C[i1] = sum(A[i1,i2] * b[i2]) or C[i2] = sum(A[i1,i2] * b[i1]) - * - * @note is used in function ttv - * - * @param[in] m zero-based contraction mode with m=0 or m=1 - * @param[out] c pointer to the output tensor C - * @param[in] nc pointer to the extents of tensor C - * @param[in] wc pointer to the strides of tensor C - * @param[in] a pointer to the first input tensor A - * @param[in] na pointer to the extents of input tensor A - * @param[in] wa pointer to the strides of input tensor A - * @param[in] b pointer to the second input tensor B + * Implements C[i1] = sum(A[i1,i2] * B[i2]) if k = 1 or C[i2] = sum(A[i1,i2] * B[i1]) if k = 0 + * + * [m,n] = size(A(..,:,..,:,..)) + * [m] = size(C(..,:,..)) + * [n] = size(B(..,:,..)) + * + * + * @param[in] k if k = 0 + * @param[in] m number of rows of A + * @param[in] n number of columns of A + * @param[out] c pointer to C + * @param[in] wc m-th (k=1) or n-th (k=0) stride for C + * @param[in] a pointer to A + * @param[in] wa_m m-th stride for A + * @param[in] wa_n n-th stride for A + * @param[in] b pointer to B + * @param[in] wb n-th (k=1) or m-th (k=0) stride for B */ template -void mtv(SizeType const m, - PointerOut c, SizeType const*const /*unsed*/, SizeType const*const wc, - PointerIn1 a, SizeType const*const na , SizeType const*const wa, - PointerIn2 b) +void mtv(SizeType const k, + SizeType const m, + SizeType const n, + PointerOut c, SizeType const wc, + PointerIn1 a, SizeType const wa_m, SizeType const wa_n, + PointerIn2 b, SizeType const wb) { - // decides whether matrix multiplied with vector or vector multiplied with matrix - const auto o = (m == 0) ? 1 : 0; - for(auto io = 0u; io < na[o]; c += wc[o], a += wa[o], ++io) { + auto const wa_x = k==0 ? wa_n : wa_m; + auto const wa_y = k==0 ? wa_m : wa_n; + + auto const x = k==0 ? n : m; + auto const y = k==0 ? m : n; + + for(auto ix = 0u; ix < x; c += wc, a += wa_x, ++ix) { auto c1 = c; auto a1 = a; auto b1 = b; - for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im) { + for(auto iy = 0u; iy < y; a1 += wa_y, b1 += wb, ++iy) { *c1 += *a1 * *b1; } } @@ -603,10 +614,12 @@ namespace boost::numeric::ublas { * C[i1,i2,...,im-1,im+1,...,ip] = sum(A[i1,i2,...,im,...,ip] * b[im]) for m>1 and * C[i2,...,ip] = sum(A[i1,...,ip] * b[i1]) for m=1 * - * @note calls detail::ttv, detail::ttv0 or detail::mtv + * @note calls detail::ttv, detail::ttv0 for p == 1 or p == 2 use ublas::inner or ublas::mtv or ublas::vtm + * + * * * @param[in] m contraction mode with 0 < m <= p - * @param[in] p number of dimensions (rank) of the first input tensor with p > 0 + * @param[in] p number of dimensions (rank) of the first input tensor with p > 2 * @param[out] c pointer to the output tensor with rank p-1 * @param[in] nc pointer to the extents of tensor c * @param[in] wc pointer to the strides of tensor c @@ -621,50 +634,25 @@ template void ttv(SizeType const m, SizeType const p, PointerOut c, SizeType const*const nc, SizeType const*const wc, const PointerIn1 a, SizeType const*const na, SizeType const*const wa, - const PointerIn2 b, SizeType const*const nb, SizeType const*const wb) + const PointerIn2 b, SizeType const*const nb, SizeType const*const /*unused*/) { - static_assert( std::is_pointer::value && std::is_pointer::value & std::is_pointer::value, + static_assert( std::is_pointer::value && std::is_pointer::value && std::is_pointer::value, "Static error in boost::numeric::ublas::ttv: Argument types for pointers are not pointer types."); - if( m == 0){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Contraction mode must be greater than zero."); - } + assert(m != 0); + assert(p >= m); + assert(p >= 2); - if( p < m ){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater equal the modus."); - } - if( p == 0){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater than zero."); - } - if(c == nullptr || a == nullptr || b == nullptr){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Pointers shall not be null pointers."); - } for(auto i = 0u; i < m-1; ++i){ - if(na[i] != nc[i]){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); - } - } - - const auto max = std::max(nb[0], nb[1]); - if( na[m-1] != max){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Extent of dimension mode of A and b must be equal."); + assert(na[i] == nc[i]); } + assert(na[m-1] == std::max(nb[0], nb[1])); - if((m != 1) && (p > 2)){ - detail::recursive::ttv(m-1, p-1, p-2, c, nc, wc, a, na, wa, b); - } - else if ((m == 1) && (p > 2)){ + if(m == 1) detail::recursive::ttv0(p-1, c, nc, wc, a, na, wa, b); - } - else if( p == 2 ){ - detail::recursive::mtv(m-1, c, nc, wc, a, na, wa, b); - } - else /*if( p == 1 )*/{ - auto v = std::remove_pointer_t>{}; - *c = detail::recursive::inner(SizeType(0), na, a, wa, b, wb, v); - } - + else + detail::recursive::ttv (m-1, p-1, p-2, c, nc, wc, a, na, wa, b); } diff --git a/include/boost/numeric/ublas/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/subtensor.hpp index e8a150d16..a119300b7 100644 --- a/include/boost/numeric/ublas/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor.hpp @@ -129,10 +129,10 @@ class subtensor > subtensor(tensor_type& t, span_types&& ... spans) : super_type () , spans_ (detail::generate_span_vector(t.extents(),std::forward(spans)...)) - , extents_ (detail::compute_extents(spans_)) + , extents_ (detail::to_extents(spans_)) , strides_ (ublas::to_strides(extents_,layout_type{})) - , span_strides_ (detail::compute_span_strides(t.strides(),spans_)) - , data_ {t.data() + detail::compute_offset(t.strides(), spans_)} + , span_strides_ (detail::to_span_strides(t.strides(),spans_)) + , data_ {t.data() + detail::to_offset(t.strides(), spans_)} { // if( m == nullptr) // throw std::length_error("Error in tensor_view::tensor_view : multi_array_type is nullptr."); diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp index 4c38be404..4b203c138 100644 --- a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -37,10 +37,10 @@ namespace boost::numeric::ublas::detail { * @param[in] spans vector of spans of the subtensor */ template -auto compute_span_strides(std::vector const& strides, Spans const& spans) +auto to_span_strides(std::vector const& strides, Spans const& spans) { if(strides.size() != spans.size()) - throw std::runtime_error("Error in boost::numeric::ublas::subtensor::compute_span_strides(): tensor strides.size() != spans.size()"); + throw std::runtime_error("Error in boost::numeric::ublas::subtensor::to_span_strides(): tensor strides.size() != spans.size()"); auto span_strides = std::vector(spans.size()); @@ -60,7 +60,7 @@ auto compute_span_strides(std::vector const& strides, Spans const& sp * @param[in] spans vector of spans of the subtensor */ template -auto compute_offset(std::vector const& strides, Spans const& spans) +auto to_offset(std::vector const& strides, Spans const& spans) { if(strides.size() != spans.size()) throw std::runtime_error("Error in boost::numeric::ublas::subtensor::offset(): tensor strides.size() != spans.size()"); @@ -77,7 +77,7 @@ auto compute_offset(std::vector const& strides, Spans const& spans) * @param[in] spans vector of spans of the subtensor */ template -auto compute_extents(spans_type const& spans) +auto to_extents(spans_type const& spans) { using extents_t = extents<>; using base_type = typename extents_t::base_type; diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp index fbb5074db..3a9205480 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp @@ -111,6 +111,21 @@ template { } + /** @brief Constructs a tensor_core with a \c shape and initial value + * + * @code auto t = tensor(extents<>{4,3,2},5); @endcode + * + * @param i initial tensor_core with this value + */ + inline tensor_core (extents_type e, value_type i) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(to_strides(_extents,layout_type{})) + , _container(product(_extents)) + { + std::fill(begin(),end(),i); + } + /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data * * @code auto t = tensor(extents<>{3,4,2},std::vector(3*4*2,1.f)); @endcode diff --git a/test/tensor/test_access.cpp b/test/tensor/test_access.cpp index dd0b08607..35fcd5938 100644 --- a/test/tensor/test_access.cpp +++ b/test/tensor/test_access.cpp @@ -209,8 +209,8 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_static_rank, layout_ auto const& w = ub::to_strides(n,layout_t{}); auto const& i = std::get(multi_index); auto const& jref = std::get(index); - constexpr auto r = std::get(ranks); mp::mp_for_each>( [&]( auto K ) { + constexpr auto r = std::get(ranks); auto const& ii = std::get(i); auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() , w.begin()); BOOST_CHECK(j < prodn(n)); @@ -226,35 +226,27 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index, layout_t, layout_ty namespace mp = boost::mp11; constexpr auto is_first_order = std::is_same_v; - constexpr auto const& index = is_first_order ? indexf : indexl; - - for(auto k = 0u; k < index.size(); ++k){ - auto const& n = shapes[k]; - auto const& w = ub::to_strides(n,layout_t{}); - auto const& iref = multi_index[k]; - auto const& jref = index[k]; - for(auto kk = 0u; kk < iref.size(); ++kk){ - auto const jj = jref[kk]; - auto const& ii = iref[kk]; - auto i = multi_index_t(w.size()); - ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); -// if constexpr ( is_first_order ) -// detail::compute_multi_index_first(jj, w.begin(), w.end(), i.begin()); -// else -// detail::compute_multi_index_last (jj, w.begin(), w.end(), i.begin()); - -// std::cout << "j= " << jj << std::endl; -// std::cout << "i= [ "; for(auto iii : i) std::cout << iii << " "; std::cout << "];" << std::endl; -// std::cout << "ii_ref = [ "; for(auto iii : ii) std::cout << iii << " "; std::cout << "];" << std::endl; -// std::cout << "n= [ "; for(auto iii : n) std::cout << iii << " "; std::cout << "];" << std::endl; -// std::cout << "w= [ "; for(auto iii : w) std::cout << iii << " "; std::cout << "];" << std::endl; -// std::cout << std::endl; - - + constexpr auto const& index = is_first_order ? indexf : indexl; - BOOST_CHECK ( std::equal(i.begin(),i.end(),ii.begin()) ) ; - } + for(auto k = 0u; k < index.size(); ++k){ + auto const& n = shapes[k]; + auto const& w = ub::to_strides(n,layout_t{}); + auto const& iref = multi_index[k]; + auto const& jref = index[k]; + for(auto kk = 0u; kk < iref.size(); ++kk){ + auto const jj = jref[kk]; + auto const& ii = iref[kk]; + auto i = multi_index_t(w.size()); + ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); +// std::cout << "j= " << jj << std::endl; +// std::cout << "i= [ "; for(auto iii : i) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << "ii_ref = [ "; for(auto iii : ii) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << "n= [ "; for(auto iii : n) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << "w= [ "; for(auto iii : w) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << std::endl; + BOOST_CHECK ( std::equal(i.begin(),i.end(),ii.begin()) ) ; } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t, layout_types, fixture ) @@ -270,12 +262,12 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t auto const& n = std::get(shapes); auto const& iref = std::get(multi_index); auto const& jref = std::get(index); - auto const& w = ub::to_strides(n,layout_t{}); - constexpr auto r = std::get(ranks); + auto const& w = ub::to_strides(n,layout_t{}); mp::mp_for_each>( [&]( auto K ) { auto const jj = std::get(jref); auto const& ii = std::get(iref); auto i = multi_index_t(w.size()); + constexpr auto r = std::get(ranks); ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); BOOST_CHECK ( std::equal(i.begin(),i.end(),ii.begin()) ) ; }); diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index f3db7334d..d091f4375 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -128,8 +128,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) auto A = tensor_type{1,2}; auto Asub = subtensor_type( A, 0, 1 ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), 1 ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), 1 ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); BOOST_CHECK_EQUAL( Asub.strides().at(0), 1 ); BOOST_CHECK_EQUAL( Asub.strides().at(1), 1 ); diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index b96fb5b57..c1fab9fa1 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -293,12 +293,12 @@ BOOST_FIXTURE_TEST_CASE( extents_test, fixture_span_vector_shape ) { namespace ublas = boost::numeric::ublas; - BOOST_CHECK ( std::equal( ublas::begin(std::get<0>(reference_)), ublas::begin(std::get<0>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<0>(span_vectors_) ) ) ) ); - BOOST_CHECK ( std::equal( ublas::begin(std::get<1>(reference_)), ublas::begin(std::get<1>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<1>(span_vectors_) ) ) ) ); - BOOST_CHECK ( std::equal( ublas::begin(std::get<2>(reference_)), ublas::begin(std::get<2>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<2>(span_vectors_) ) ) ) ); - BOOST_CHECK ( std::equal( ublas::begin(std::get<3>(reference_)), ublas::begin(std::get<3>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<3>(span_vectors_) ) ) ) ); - BOOST_CHECK ( std::equal( ublas::begin(std::get<4>(reference_)), ublas::begin(std::get<4>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<4>(span_vectors_) ) ) ) ); - BOOST_CHECK ( std::equal( ublas::begin(std::get<5>(reference_)), ublas::begin(std::get<5>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<5>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<0>(reference_)), ublas::begin(std::get<0>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<0>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<1>(reference_)), ublas::begin(std::get<1>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<1>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<2>(reference_)), ublas::begin(std::get<2>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<2>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<3>(reference_)), ublas::begin(std::get<3>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<3>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<4>(reference_)), ublas::begin(std::get<4>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<4>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<5>(reference_)), ublas::begin(std::get<5>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<5>(span_vectors_) ) ) ) ); } @@ -314,35 +314,35 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( offset_test, layout, test_types, fixture_span_ { auto s = std::get<0>(span_vectors_); auto w = ublas::to_strides( std::get<0>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<1>(span_vectors_); auto w = ublas::to_strides( std::get<1>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<2>(span_vectors_); auto w = ublas::to_strides( std::get<2>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<3>(span_vectors_); auto w = ublas::to_strides( std::get<3>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] ); } { auto s = std::get<4>(span_vectors_); auto w = ublas::to_strides( std::get<4>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] ); } @@ -350,7 +350,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( offset_test, layout, test_types, fixture_span_ { auto s = std::get<5>(span_vectors_); auto w = ublas::to_strides( std::get<5>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] + s[3].first()*w[3] ); } From 949ffaac97a3b2bcb263c315e14caae85ef3d363 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Sun, 11 Dec 2022 17:19:31 +0100 Subject: [PATCH 7/9] remove difference between strides and slices. --- include/boost/numeric/ublas/tensor/span.hpp | 131 ++------ .../boost/numeric/ublas/tensor/subtensor.hpp | 64 ++-- .../ublas/tensor/subtensor_utility.hpp | 72 ++-- test/tensor/test_span.cpp | 29 +- test/tensor/test_subtensor.cpp | 16 +- test/tensor/test_subtensor_utility.cpp | 313 +++++++++--------- 6 files changed, 256 insertions(+), 369 deletions(-) diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 581e16dc6..b18cb3552 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -21,13 +21,6 @@ #include "concepts.hpp" -namespace boost::numeric::ublas::tag{ - -struct sliced {}; -struct strided {}; - -} // namespace boost::numeric::ublas::tag - namespace boost::numeric::ublas { @@ -43,21 +36,17 @@ namespace boost::numeric::ublas { */ +//template +//class span; -//using offsets = std::vector; - -template -class span; - - -static constexpr inline std::size_t max = std::numeric_limits::max(); - -template<> -class span +template +class span { public: - using span_tag = tag::strided; - using value_type = std::size_t; + using value_type = unsigned_type; + + static constexpr inline value_type max = std::numeric_limits::max(); + // covers the complete range of one dimension // e.g. a(:) @@ -92,6 +81,13 @@ class span } } + // covers a linear range of one dimension + // e.g. a(1:n) + span(value_type f, value_type l) + : span(f,1,l) + { + } + // covers only one index of one dimension // e.g. a(1) or a(end) span(value_type n) @@ -142,105 +138,34 @@ class span value_type first_, last_ , step_, size_; }; -using strided_span = span; - -} // namespace - - -///////////// - -namespace boost::numeric::ublas { - -template<> -class span : - private span -{ - using super_type = span; -public: - using span_tag = tag::sliced; - using value_type = typename super_type::value_type; - constexpr explicit span() - : super_type() - { - } - - span(value_type f, value_type l) - : super_type(f, value_type(1), l ) - { - } - - span(value_type n) - : super_type(n) - { - } - - span(span const& other) - : super_type(other) - { - } - - inline span& operator=(const span &other) - { - super_type::operator=(other); - return *this; - } - - ~span() = default; - - inline value_type operator[] (std::size_t idx) const - { - return super_type::operator [](idx); - } - - inline auto first() const {return super_type::first(); } - inline auto last () const {return super_type::last (); } - inline auto step () const {return super_type::step (); } - inline auto size () const {return super_type::size (); } - - inline span operator()(const span &rhs) const - { - auto const& lhs = *this; - return span( rhs.first_ + lhs.first_, rhs.last_ + lhs.first_ ); - } -}; - -using sliced_span = span; - - -template -inline auto ran(unsigned_type_left f, unsigned_type_right l) -{ - return sliced_span(f,l); -} - -template -inline auto ran(unsigned_type_left f, unsigned_type_middle s, unsigned_type_right l) -{ - return strided_span(f,s,l); -} +using sspan = span; } // namespace -template -std::ostream& operator<< (std::ostream& out, boost::numeric::ublas::span const& s) +template +std::ostream& operator<< (std::ostream& out, boost::numeric::ublas::span const& s) { return out << "[" << s.first() << ":" << s.step() << ":" << s.last() << "]" << std::endl; } -template +template< + boost::numeric::ublas::integral unsigned_type_lhs, + boost::numeric::ublas::integral unsigned_type_rhs> inline bool operator==( - boost::numeric::ublas::span const& lhs, - boost::numeric::ublas::span const& rhs) + boost::numeric::ublas::span const& lhs, + boost::numeric::ublas::span const& rhs) { return lhs.first() == rhs.first() && lhs.last() == rhs.last() && lhs.step() == rhs.step(); } -template +template< + boost::numeric::ublas::integral unsigned_type_lhs, + boost::numeric::ublas::integral unsigned_type_rhs> inline bool operator!=( - boost::numeric::ublas::span const& lhs, - boost::numeric::ublas::span const& rhs) + boost::numeric::ublas::span const& lhs, + boost::numeric::ublas::span const& rhs) { return lhs.first() != rhs.first() || lhs.last() != rhs.last() || lhs.step() != rhs.step(); } diff --git a/include/boost/numeric/ublas/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/subtensor.hpp index a119300b7..cc469071f 100644 --- a/include/boost/numeric/ublas/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor.hpp @@ -29,11 +29,9 @@ namespace boost::numeric::ublas { /** @brief A view of a dense tensor of values of type \c T. * - * @tparam T type of the objects stored in the tensor (like int, double, complex,...) - * @tparam F - * @tparam A The type of the storage array of the tensor. Default is \c unbounded_array. \c and \c std::vector can also be used + * @tparam T tensor type */ -template +template class subtensor; @@ -48,22 +46,20 @@ class subtensor; * @tparam A The type of the storage array of the tensor. Default is \c unbounded_array. \c and \c std::vector can also be used */ template -class subtensor > +class subtensor > : public detail::tensor_expression< - subtensor> , - subtensor> > + subtensor> , + subtensor> > { static_assert( std::is_same::value || std::is_same::value, "boost::numeric::tensor template class only supports first- or last-order storage formats."); using tensor_type = tensor_dynamic; - using self_type = subtensor; + using self_type = subtensor; public: - using domain_tag = tag::sliced; - - using span_type = span; + using span_type = sspan; template using tensor_expression_type = detail::tensor_expression; @@ -116,23 +112,23 @@ class subtensor > */ BOOST_UBLAS_INLINE subtensor (tensor_type& t) - : super_type () - , spans_ () - , extents_ (t.extents()) - , strides_ (t.strides()) - , span_strides_ (t.strides()) - , data_ (t.data()) + : super_type () + , spans_ () + , extents_ (t.extents()) + , strides_ (t.strides()) + , span_strides_ (t.strides()) + , data_ (t.data()) { } template - subtensor(tensor_type& t, span_types&& ... spans) - : super_type () - , spans_ (detail::generate_span_vector(t.extents(),std::forward(spans)...)) - , extents_ (detail::to_extents(spans_)) - , strides_ (ublas::to_strides(extents_,layout_type{})) - , span_strides_ (detail::to_span_strides(t.strides(),spans_)) - , data_ {t.data() + detail::to_offset(t.strides(), spans_)} + subtensor(tensor_type& t, span_types&& ... spans) + : super_type () + , spans_ (detail::generate_vector(t.extents(),std::forward(spans)...)) + , extents_ (detail::to_extents(spans_)) + , strides_ (ublas::to_strides(extents_,layout_type{})) + , span_strides_ (detail::to_span_strides(t.strides(),spans_)) + , data_ {t.data() + detail::to_offset(t.strides(), spans_)} { // if( m == nullptr) // throw std::length_error("Error in tensor_view::tensor_view : multi_array_type is nullptr."); @@ -145,16 +141,16 @@ class subtensor > * * @note is similar to a handle to a tensor */ - explicit - subtensor (tensor_type const& t) - : super_type () - , spans_ () - , extents_ (t.extents()) - , strides_ (t.strides()) - , span_strides_ (t.strides()) - , data_ (t.data()) - { - } + explicit + subtensor (tensor_type const& t) + : super_type () + , spans_ () + , extents_ (t.extents()) + , strides_ (t.strides()) + , span_strides_ (t.strides()) + , data_ (t.data()) + { + } diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp index 4b203c138..3204d393b 100644 --- a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -91,69 +91,57 @@ auto to_extents(spans_type const& spans) /*! @brief Auxiliary function for subtensor which possibly transforms a span instance * - * transform_span(span() ,4) -> span(0,3) - * transform_span(span(1,1) ,4) -> span(1,1) - * transform_span(span(1,3) ,4) -> span(1,3) - * transform_span(span(2,end),4) -> span(2,3) - * transform_span(span(end) ,4) -> span(3,3) + * transform(span() ,4) -> span(0,3) + * transform(span(1,1) ,4) -> span(1,1) + * transform(span(1,3) ,4) -> span(1,3) + * transform(span(2,end),4) -> span(2,3) + * transform(span(end) ,4) -> span(3,3) * * @note span is zero-based indexed. * * @param[in] s span that is going to be transformed * @param[in] extent extent that is maybe used for the tranformation */ -template -auto transform_span(span const& s, std::size_t const extent) +template +auto transform(span const& s, size_type const extent) { - using span_type = span; + using span_type = span; - std::size_t first = s.first(); - std::size_t last = s.last (); - std::size_t size = s.size (); + std::size_t first = s.first(); + std::size_t last = s.last (); + std::size_t size = s.size (); - auto const extent0 = extent-1; + auto const extent0 = extent-size_type(1u); - auto constexpr is_sliced = std::is_same::value; - - - if constexpr ( is_sliced ){ - if(size == 0) return span_type(0 , extent0); - else if(first== max) return span_type(extent0 , extent0); - else if(last == max) return span_type(first , extent0); - else return span_type(first , last ); - } - else { size_type step = s.step (); - if(size == 0) return span_type(0 , size_type(1), extent0); - else if(first== max) return span_type(extent0 , step, extent0); - else if(last == max) return span_type(first , step, extent0); - else return span_type(first , step, last ); - } - return span_type{}; + if(size == 0) return span_type(0 , 1u, extent0); + else if(first== span_type::max) return span_type(extent0 , step, extent0); + else if(last == span_type::max) return span_type(first , step, extent0); + else return span_type(first , step, last ); } -template -void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ); +template +void transform_impl (extents<> const& extents, std::array& span_array, size_type arg, Spans&& ... spans ); -template -void transform_spans_impl(extents<> const& extents, std::array& span_array, span const& s, Spans&& ... spans) +template +void transform_impl(extents<> const& extents, std::array& span_array, span const& s, Spans&& ... spans) { - std::get(span_array) = transform_span(s, extents[r]); + std::get(span_array) = transform(s, extents[r]); static constexpr auto nspans = sizeof...(spans); static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); if constexpr (nspans>0) - transform_spans_impl(extents, span_array, std::forward(spans)...); + transform_impl(extents, span_array, std::forward(spans)...); } -template -void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ) +template +void transform_impl (extents<> const& extents, std::array& span_array, size_type arg, Spans&& ... spans ) { static constexpr auto nspans = sizeof...(Spans); static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); - std::get(span_array) = transform_span(Span(arg), extents[r]); + std::get(span_array) = transform(Span(arg), extents[r]); if constexpr (nspans>0) - transform_spans_impl(extents, span_array, std::forward(spans) ... ); + transform_impl(extents, span_array, std::forward(spans) ... ); } @@ -169,14 +157,14 @@ void transform_spans_impl (extents<> const& extents, std::array& span_ar * @param[in] spans spans with which the subtensor is created */ template -auto generate_span_array(extents<> const& extents, Spans&& ... spans) +auto generate_array(extents<> const& extents, Spans&& ... spans) { constexpr static auto n = sizeof...(Spans); if(extents.size() != n) throw std::runtime_error("Error in boost::numeric::ublas::generate_span_vector() when creating subtensor: the number of spans does not match with the tensor rank."); std::array span_array; if constexpr (n>0) - transform_spans_impl<0>( extents, span_array, std::forward(spans)... ); + transform_impl<0>( extents, span_array, std::forward(spans)... ); return span_array; } @@ -192,9 +180,9 @@ auto generate_span_array(extents<> const& extents, Spans&& ... spans) * @param[in] spans spans with which the subtensor is created */ template -auto generate_span_vector(extents<> const& extents, Spans&& ... spans) +auto generate_vector(extents<> const& extents, Spans&& ... spans) { - auto span_array = generate_span_array(extents,std::forward(spans)...); + auto span_array = generate_array(extents,std::forward(spans)...); return std::vector(span_array.begin(), span_array.end()); } diff --git a/test/tensor/test_span.cpp b/test/tensor/test_span.cpp index 1b1da2a63..6da21dcc8 100644 --- a/test/tensor/test_span.cpp +++ b/test/tensor/test_span.cpp @@ -17,7 +17,7 @@ BOOST_AUTO_TEST_SUITE( span_testsuite ); struct fixture { - using span_type = boost::numeric::ublas::strided_span; + using span_type = boost::numeric::ublas::sspan; fixture() : spans { @@ -39,7 +39,7 @@ struct fixture { BOOST_FIXTURE_TEST_CASE( ctor_test, fixture ) { - using span_type = boost::numeric::ublas::strided_span; + using span_type = typename fixture::span_type; BOOST_CHECK_EQUAL (spans[0].first(),0); BOOST_CHECK_EQUAL (spans[0].step (),0); @@ -96,8 +96,7 @@ BOOST_FIXTURE_TEST_CASE( ctor_test, fixture ) BOOST_FIXTURE_TEST_CASE( copy_ctor_test, fixture ) { - using span_type = boost::numeric::ublas::strided_span; - + using span_type = typename fixture::span_type; BOOST_CHECK_EQUAL (span_type(spans[0]).first(),0); BOOST_CHECK_EQUAL (span_type(spans[0]).step (),0); @@ -233,26 +232,4 @@ BOOST_FIXTURE_TEST_CASE( access_operator_test, fixture ) } -BOOST_FIXTURE_TEST_CASE( ran_test, fixture ) -{ - using namespace boost::numeric::ublas; - - BOOST_CHECK ( ( ran(0,0,0) == spans[0]) ); - - BOOST_CHECK ( ( ran(0,1,0) == spans[2]) ); - BOOST_CHECK ( ( ran(0, 0) == spans[2]) ); - - - BOOST_CHECK ( ( ran(0,1,2) == spans[3]) ); - BOOST_CHECK ( ( ran(0, 2) == spans[3]) ); - - BOOST_CHECK ( ( ran(1,1,2) == spans[4]) ); - BOOST_CHECK ( ( ran(1, 2) == spans[4]) ); - - BOOST_CHECK ( ( ran(0,2,4) == spans[5]) ); - BOOST_CHECK ( ( ran(1,2,4) == spans[6]) ); - BOOST_CHECK ( ( ran(1,3,5) == spans[7]) ); - BOOST_CHECK ( ( ran(1,3,7) == spans[8]) ); -} - BOOST_AUTO_TEST_SUITE_END(); diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index d091f4375..541709f7c 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -56,7 +56,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixt using value_type = typename value::first_type; using layout_type = typename value::second_type; using tensor_type = ublas::tensor_dynamic; - using subtensor_type = ublas::subtensor; + using subtensor_type = ublas::subtensor; auto check = [](auto const& e) { @@ -88,8 +88,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) using value_type = typename value::first_type; using layout_type = typename value::second_type; using tensor_type = ub::tensor_dynamic; - using subtensor_type = ub::subtensor; - using span = ub::sliced_span; + using subtensor_type = ub::subtensor; + using span_type = typename subtensor_type::span_type; { @@ -117,7 +117,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) { auto A = tensor_type{1,2}; - auto Asub = subtensor_type( A, 0, span{} ); + auto Asub = subtensor_type( A, 0, span_type{} ); BOOST_CHECK( Asub.span_strides() == A.strides() ); BOOST_CHECK( Asub.strides() == A.strides() ); @@ -164,7 +164,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) { auto A = tensor_type{4,3}; - auto Asub = subtensor_type( A, span(1,2), span(1,ub::max) ); + auto Asub = subtensor_type( A, span_type(1,2), span_type(1,span_type::max) ); auto B = tensor_type(Asub.getExtents()()); BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); @@ -183,7 +183,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) { auto A = tensor_type{4,3,5}; - auto Asub = subtensor_type( A, span(1,2), span(1,ub::max), span(2,4) ); + auto Asub = subtensor_type( A, span_type(1,2), span_type(1,span_type::max), span_type(2,4) ); auto B = tensor_type(Asub.getExtents()()); @@ -215,8 +215,8 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, f using value_type = typename value::first_type; using layout_type = typename value::second_type; using tensor_type = ublas::tensor_dynamic; - using subtensor_type = ublas::subtensor; - // using span = ub::sliced_span; + using subtensor_type = ublas::subtensor; + using span_type = typename subtensor_type::span_type; diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index c1fab9fa1..5d55a032b 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -26,20 +26,21 @@ BOOST_AUTO_TEST_SUITE ( subtensor_utility_testsuite ) struct fixture_sliced_span { - using span_type = boost::numeric::ublas::sliced_span; - - fixture_sliced_span() - : spans{ - span_type(), // 0, a(:) - span_type(0,0), // 1, a(0:0) - span_type(0,2), // 2, a(0:2) - span_type(1,1), // 3, a(1:1) - span_type(1,3), // 4, a(1:3) - span_type(1,boost::numeric::ublas::max), // 5, a(1:end) - span_type(boost::numeric::ublas::max) // 6, a(end) - } - {} - std::vector spans; + using span_type = boost::numeric::ublas::sspan; + + fixture_sliced_span() + : spans{ + span_type(), // 0, a(:) + span_type(0,0), // 1, a(0:0) + span_type(0,2), // 2, a(0:2) + span_type(1,1), // 3, a(1:1) + span_type(1,3), // 4, a(1:3) + span_type(1,max), // 5, a(1:end) + span_type(max) // 6, a(end) +} + {} + std::vector spans; + static inline constexpr auto max = span_type::max; }; @@ -49,88 +50,88 @@ BOOST_FIXTURE_TEST_CASE( transform_sliced_span_test, fixture_sliced_span ) namespace ublas = boost::numeric::ublas; // template - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(2) ) == ublas::sliced_span(0,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(3) ) == ublas::sliced_span(0,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(4) ) == ublas::sliced_span(0,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(0), std::size_t(2) ) == ublas::sspan(0,1) ); + BOOST_CHECK( ublas::detail::transform(spans.at(0), std::size_t(3) ) == ublas::sspan(0,2) ); + BOOST_CHECK( ublas::detail::transform(spans.at(0), std::size_t(4) ) == ublas::sspan(0,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(2) ) == ublas::sliced_span(0,0) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(3) ) == ublas::sliced_span(0,0) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(4) ) == ublas::sliced_span(0,0) ); + BOOST_CHECK( ublas::detail::transform(spans.at(1), std::size_t(3) ) == ublas::sspan(0,0) ); + BOOST_CHECK( ublas::detail::transform(spans.at(1), std::size_t(2) ) == ublas::sspan(0,0) ); + BOOST_CHECK( ublas::detail::transform(spans.at(1), std::size_t(4) ) == ublas::sspan(0,0) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(3) ) == ublas::sliced_span(0,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(4) ) == ublas::sliced_span(0,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(5) ) == ublas::sliced_span(0,2) ); + BOOST_CHECK( ublas::detail::transform(spans.at(2), std::size_t(3) ) == ublas::sspan(0,2) ); + BOOST_CHECK( ublas::detail::transform(spans.at(2), std::size_t(4) ) == ublas::sspan(0,2) ); + BOOST_CHECK( ublas::detail::transform(spans.at(2), std::size_t(5) ) == ublas::sspan(0,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(2) ) == ublas::sliced_span(1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(3) ) == ublas::sliced_span(1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(4) ) == ublas::sliced_span(1,1) ); + BOOST_CHECK( ublas::detail::transform(spans.at(3), std::size_t(2) ) == ublas::sspan(1,1) ); + BOOST_CHECK( ublas::detail::transform(spans.at(3), std::size_t(3) ) == ublas::sspan(1,1) ); + BOOST_CHECK( ublas::detail::transform(spans.at(3), std::size_t(4) ) == ublas::sspan(1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(4) ) == ublas::sliced_span(1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(5) ) == ublas::sliced_span(1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(6) ) == ublas::sliced_span(1,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(4), std::size_t(4) ) == ublas::sspan(1,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(4), std::size_t(5) ) == ublas::sspan(1,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(4), std::size_t(6) ) == ublas::sspan(1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(4) ) == ublas::sliced_span(1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(5) ) == ublas::sliced_span(1,4) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(6) ) == ublas::sliced_span(1,5) ); + BOOST_CHECK( ublas::detail::transform(spans.at(5), std::size_t(4) ) == ublas::sspan(1,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(5), std::size_t(5) ) == ublas::sspan(1,4) ); + BOOST_CHECK( ublas::detail::transform(spans.at(5), std::size_t(6) ) == ublas::sspan(1,5) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(4) ) == ublas::sliced_span(3,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(5) ) == ublas::sliced_span(4,4) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(6) ) == ublas::sliced_span(5,5) ); + BOOST_CHECK( ublas::detail::transform(spans.at(6), std::size_t(4) ) == ublas::sspan(3,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(6), std::size_t(5) ) == ublas::sspan(4,4) ); + BOOST_CHECK( ublas::detail::transform(spans.at(6), std::size_t(6) ) == ublas::sspan(5,5) ); } -struct fixture_strided_span { - using span_type = boost::numeric::ublas::strided_span; +struct fixture_sspan { + using span_type = boost::numeric::ublas::sspan; - fixture_strided_span() - : spans{ - span_type(), // 0, a(:) - span_type(0,1,0), // 1, a(0:1:0) - span_type(0,2,2), // 2, a(0:2:2) - span_type(1,1,1), // 3, a(1:1:1) - span_type(1,1,3), // 4, a(1:1:3) - span_type(1,2,boost::numeric::ublas::max), // 5, a(1:2:end) - span_type(boost::numeric::ublas::max) // 6, a(end) - } - {} - std::vector spans; + fixture_sspan() + : spans{ + span_type(), // 0, a(:) + span_type(0,1,0), // 1, a(0:1:0) + span_type(0,2,2), // 2, a(0:2:2) + span_type(1,1,1), // 3, a(1:1:1) + span_type(1,1,3), // 4, a(1:1:3) + span_type(1,2,span_type::max), // 5, a(1:2:end) + span_type(span_type::max) // 6, a(end) +} + {} + std::vector spans; }; -BOOST_FIXTURE_TEST_CASE( transform_strided_span_test, fixture_strided_span ) +BOOST_FIXTURE_TEST_CASE( transform_sspan_test, fixture_sspan ) { using namespace boost::numeric; // template - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(2) ) == ublas::strided_span(0,1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(3) ) == ublas::strided_span(0,1,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(4) ) == ublas::strided_span(0,1,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(0), std::size_t(2) ) == ublas::sspan(0,1,1) ); + BOOST_CHECK( ublas::detail::transform(spans.at(0), std::size_t(3) ) == ublas::sspan(0,1,2) ); + BOOST_CHECK( ublas::detail::transform(spans.at(0), std::size_t(4) ) == ublas::sspan(0,1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(2) ) == ublas::strided_span(0,1,0) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(3) ) == ublas::strided_span(0,1,0) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(4) ) == ublas::strided_span(0,1,0) ); + BOOST_CHECK( ublas::detail::transform(spans.at(1), std::size_t(2) ) == ublas::sspan(0,1,0) ); + BOOST_CHECK( ublas::detail::transform(spans.at(1), std::size_t(3) ) == ublas::sspan(0,1,0) ); + BOOST_CHECK( ublas::detail::transform(spans.at(1), std::size_t(4) ) == ublas::sspan(0,1,0) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(3) ) == ublas::strided_span(0,2,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(4) ) == ublas::strided_span(0,2,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(5) ) == ublas::strided_span(0,2,2) ); + BOOST_CHECK( ublas::detail::transform(spans.at(2), std::size_t(3) ) == ublas::sspan(0,2,2) ); + BOOST_CHECK( ublas::detail::transform(spans.at(2), std::size_t(4) ) == ublas::sspan(0,2,2) ); + BOOST_CHECK( ublas::detail::transform(spans.at(2), std::size_t(5) ) == ublas::sspan(0,2,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(2) ) == ublas::strided_span(1,1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(3) ) == ublas::strided_span(1,1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(4) ) == ublas::strided_span(1,1,1) ); + BOOST_CHECK( ublas::detail::transform(spans.at(3), std::size_t(2) ) == ublas::sspan(1,1,1) ); + BOOST_CHECK( ublas::detail::transform(spans.at(3), std::size_t(3) ) == ublas::sspan(1,1,1) ); + BOOST_CHECK( ublas::detail::transform(spans.at(3), std::size_t(4) ) == ublas::sspan(1,1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(4) ) == ublas::strided_span(1,1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(5) ) == ublas::strided_span(1,1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(6) ) == ublas::strided_span(1,1,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(4), std::size_t(4) ) == ublas::sspan(1,1,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(4), std::size_t(5) ) == ublas::sspan(1,1,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(4), std::size_t(6) ) == ublas::sspan(1,1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(4) ) == ublas::strided_span(1,2,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(5) ) == ublas::strided_span(1,2,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(6) ) == ublas::strided_span(1,2,5) ); + BOOST_CHECK( ublas::detail::transform(spans.at(5), std::size_t(4) ) == ublas::sspan(1,2,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(5), std::size_t(5) ) == ublas::sspan(1,2,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(5), std::size_t(6) ) == ublas::sspan(1,2,5) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(4) ) == ublas::strided_span(3,1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(5) ) == ublas::strided_span(4,1,4) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(6) ) == ublas::strided_span(5,1,5) ); + BOOST_CHECK( ublas::detail::transform(spans.at(6), std::size_t(4) ) == ublas::sspan(3,1,3) ); + BOOST_CHECK( ublas::detail::transform(spans.at(6), std::size_t(5) ) == ublas::sspan(4,1,4) ); + BOOST_CHECK( ublas::detail::transform(spans.at(6), std::size_t(6) ) == ublas::sspan(5,1,5) ); } @@ -159,90 +160,90 @@ struct fixture_shape { BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) { - namespace ublas = boost::numeric::ublas; - using span = ublas::sliced_span; - - // shape{} - { - auto v = ublas::detail::generate_span_array(extents[0]); - auto r = std::vector{}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } - - - // shape{1,1} - { - auto v = ublas::detail::generate_span_array(extents[1],span(),span()); - auto r = std::vector{span(0,0),span(0,0)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } - - // shape{1,1} - { - auto v = ublas::detail::generate_span_array(extents[1],ublas::max,span(ublas::max)); - auto r = std::vector{span(0,0),span(0,0)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } - - // shape{1,1} - { - auto v = ublas::detail::generate_span_array(extents[1],0,ublas::max); - auto r = std::vector{span(0,0),span(0,0)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } - - // shape{1,2} - { - auto v = ublas::detail::generate_span_array(extents[2],0,ublas::max); - auto r = std::vector{span(0,0),span(1,1)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } - - // shape{1,2} - { - auto v = ublas::detail::generate_span_array(extents[2],0,1); - auto r = std::vector{span(0,0),span(1,1)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } - - { - auto v = ublas::detail::generate_span_array(extents[2],span(),span()); - auto r = std::vector{span(0,0),span(0,1)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } - - // shape{2,3} - { - auto v = ublas::detail::generate_span_array(extents[4],span(),span()); - auto r = std::vector{span(0,1),span(0,2)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } - - { - auto v = ublas::detail::generate_span_array(extents[4],1,span(1,ublas::max)); - auto r = std::vector{span(1,1),span(1,2)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } - - // shape{2,3,1} - { - auto v = ublas::detail::generate_span_array(extents[5],span(),span(),0); - auto r = std::vector{span(0,1),span(0,2),span(0,0)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } - - { - auto v = ublas::detail::generate_span_array(extents[5],1,span(),ublas::max); - auto r = std::vector{span(1,1),span(0,2),span(0,0)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } + namespace ublas = boost::numeric::ublas; + using span = ublas::sspan; + + // shape{} + { + auto v = ublas::detail::generate_array(extents[0]); + auto r = std::vector{}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + + // shape{1,1} + { + auto v = ublas::detail::generate_array(extents[1],span(),span()); + auto r = std::vector{span(0,0),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,1} + { + auto v = ublas::detail::generate_array(extents[1],span::max,span(span::max)); + auto r = std::vector{span(0,0),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,1} + { + auto v = ublas::detail::generate_array(extents[1],0,span::max); + auto r = std::vector{span(0,0),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,2} + { + auto v = ublas::detail::generate_array(extents[2],0,span::max); + auto r = std::vector{span(0,0),span(1,1)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,2} + { + auto v = ublas::detail::generate_array(extents[2],0,1); + auto r = std::vector{span(0,0),span(1,1)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + { + auto v = ublas::detail::generate_array(extents[2],span(),span()); + auto r = std::vector{span(0,0),span(0,1)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{2,3} + { + auto v = ublas::detail::generate_array(extents[4],span(),span()); + auto r = std::vector{span(0,1),span(0,2)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + { + auto v = ublas::detail::generate_array(extents[4],1,span(1,span::max)); + auto r = std::vector{span(1,1),span(1,2)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{2,3,1} + { + auto v = ublas::detail::generate_array(extents[5],span(),span(),0); + auto r = std::vector{span(0,1),span(0,2),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + { + auto v = ublas::detail::generate_array(extents[5],1,span(),span::max); + auto r = std::vector{span(1,1),span(0,2),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } } struct fixture_span_vector_shape { - using shape = boost::numeric::ublas::extents<>; - using span = boost::numeric::ublas::sliced_span; + using shape = boost::numeric::ublas::extents<>; + using span = boost::numeric::ublas::sspan; fixture_span_vector_shape() @@ -255,12 +256,12 @@ struct fixture_span_vector_shape { shape{4,2,3,5} // 5 } , span_vectors_{ - /*A(:)*/ boost::numeric::ublas::detail::generate_span_array(extents_[0]), - /*A(0,0)*/ boost::numeric::ublas::detail::generate_span_array(extents_[1],0,0), - /*A(0,:)*/ boost::numeric::ublas::detail::generate_span_array(extents_[2],0,span()), - /*A(1,1:2)*/ boost::numeric::ublas::detail::generate_span_array(extents_[3],1,span(1,2)), - /*A(1:3,1,1:2)*/ boost::numeric::ublas::detail::generate_span_array(extents_[4],span(1,3),1,span(0,1)), - /*A(1:3,1,0:1,2:4)*/ boost::numeric::ublas::detail::generate_span_array(extents_[5],span(1,3),1,span(0,1),span(2,4)), + /*A(:)*/ boost::numeric::ublas::detail::generate_array(extents_[0]), + /*A(0,0)*/ boost::numeric::ublas::detail::generate_array(extents_[1],0,0), + /*A(0,:)*/ boost::numeric::ublas::detail::generate_array(extents_[2],0,span()), + /*A(1,1:2)*/ boost::numeric::ublas::detail::generate_array(extents_[3],1,span(1,2)), + /*A(1:3,1,1:2)*/ boost::numeric::ublas::detail::generate_array(extents_[4],span(1,3),1,span(0,1)), + /*A(1:3,1,0:1,2:4)*/ boost::numeric::ublas::detail::generate_array(extents_[5],span(1,3),1,span(0,1),span(2,4)), } , reference_ { shape{}, From 50e19e5b8e396ac81848f85392754c6dbf4feb37 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Sun, 11 Dec 2022 23:13:56 +0100 Subject: [PATCH 8/9] fix wrong mtv call from test multiplication. --- .../numeric/ublas/tensor/multiplication.hpp | 2 +- .../test_multiplication_mtv.cpp | 35 ++++++++++++------- test/tensor/test_main.cpp | 24 ------------- 3 files changed, 23 insertions(+), 38 deletions(-) delete mode 100644 test/tensor/test_main.cpp diff --git a/include/boost/numeric/ublas/tensor/multiplication.hpp b/include/boost/numeric/ublas/tensor/multiplication.hpp index ea7901814..668ac5808 100644 --- a/include/boost/numeric/ublas/tensor/multiplication.hpp +++ b/include/boost/numeric/ublas/tensor/multiplication.hpp @@ -344,7 +344,7 @@ void ttv0(SizeType const r, * [n] = size(B(..,:,..)) * * - * @param[in] k if k = 0 + * @param[in] k C[i1] = sum(A[i1,i2] * B[i2]) if k = 1 or C[i2] = sum(A[i1,i2] * B[i1]) if k = 0 * @param[in] m number of rows of A * @param[in] n number of columns of A * @param[out] c pointer to C diff --git a/test/tensor/multiplication/test_multiplication_mtv.cpp b/test/tensor/multiplication/test_multiplication_mtv.cpp index 272407ff4..019b19146 100644 --- a/test/tensor/multiplication/test_multiplication_mtv.cpp +++ b/test/tensor/multiplication/test_multiplication_mtv.cpp @@ -10,13 +10,15 @@ // Google and Fraunhofer IOSB, Ettlingen, Germany // -#include #include "../fixture_utility.hpp" +#include #include #include +#include + BOOST_AUTO_TEST_SUITE(test_multiplication_mtv, - *boost::unit_test::description("Validate Matrix Times Vector") + *boost::unit_test::description("Test Matrix Times Vector") ) @@ -62,11 +64,13 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(test_extents_dynamic, auto wc = ublas::to_strides(nc,layout_type{}); auto c = vector_t (ublas::product(nc), value_type{0}); - ublas::detail::recursive::mtv( + ublas::detail::recursive::mtv( m, - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data()); + na[0], + na[1], + c.data(), 1ul, + a.data(), wa[0], wa[1], + b.data(), 1ul); auto v = value_type{static_cast(na[m])}; BOOST_CHECK(std::equal(c.begin(),c.end(),a.begin(), [v](auto cc, auto aa){return cc == v*aa;})); @@ -123,9 +127,11 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(test_extents_static_rank, ublas::detail::recursive::mtv( m, - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data()); + na[0], + na[1], + c.data(), 1ul, + a.data(), wa[0], wa[1], + b.data(), 1ul); auto v = value_type{static_cast(na[m])}; BOOST_CHECK(std::equal(c.begin(),c.end(),a.begin(), [v](auto cc, auto aa){return cc == v*aa;})); @@ -214,11 +220,14 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(test_extents_static, auto c = std::array >(); std::fill(std::begin(c), std::end(c), value_type{0}); + ublas::detail::recursive::mtv( m, - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data()); + na[0], + na[1], + c.data(), 1ul, + a.data(), wa[0], wa[1], + b.data(), 1ul); auto v = value_type{static_cast(na[m])}; BOOST_CHECK(std::equal(c.begin(),c.end(),a.begin(), [v](auto cc, auto aa){return cc == v*aa;})); @@ -229,4 +238,4 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(test_extents_static, }); } -BOOST_AUTO_TEST_SUITE_END() \ No newline at end of file +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_main.cpp b/test/tensor/test_main.cpp deleted file mode 100644 index fbcd35896..000000000 --- a/test/tensor/test_main.cpp +++ /dev/null @@ -1,24 +0,0 @@ -// -// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - - - -// #include -// #include - -#ifndef BOOST_TEST_DYN_LINK -#define BOOST_TEST_DYN_LINK -#endif -// NOLINTNEXTLINE -#define BOOST_TEST_MODULE MainTensor - -#include From 997df315774573a5a3cce8388e08622a4bb8b367 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Sun, 28 May 2023 10:52:00 +0200 Subject: [PATCH 9/9] revert stride computation for vectors. --- .../boost/numeric/ublas/tensor/dynamic_strides.hpp | 4 ++-- .../ublas/tensor/extents/extents_functions.hpp | 11 +++-------- test/tensor/Jamfile | 2 +- test/tensor/algorithm/test_algorithm_trans.cpp | 2 +- .../multiplication/test_multiplication_mtv.cpp | 4 ++-- test/tensor/test_access.cpp | 12 ++++++------ 6 files changed, 15 insertions(+), 20 deletions(-) diff --git a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp index 45001f179..d22711049 100644 --- a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp +++ b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp @@ -83,8 +83,8 @@ class basic_strides if( !valid(n) ) throw std::runtime_error("Error in boost::numeric::ublas::basic_strides() : shape is not valid."); -// if( is_vector(s) || is_scalar(s) ) /* */ -// return; + if( is_vector(n) || is_scalar(n) ) /* */ + return; const auto p = this->size(); diff --git a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp index cfa247b4d..5656d5777 100644 --- a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp +++ b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp @@ -184,11 +184,9 @@ template { auto s = typename extents_core::base_type(e.size(),1ul); - if(empty(e) || is_scalar(e)){ + if(empty(e) || is_scalar(e) || is_vector(e)){ return s; - } - - // || is_vector(e) + } if constexpr(std::is_same_v){ std::transform(begin (e), end (e) - 1, s.begin (), s.begin ()+1, std::multiplies<>{}); @@ -204,13 +202,10 @@ template auto s = typename extents_core::base_type{}; std::fill(s.begin(),s.end(),1ul); - if(empty(e) || is_scalar(e)){ + if(empty(e) || is_scalar(e) || is_vector(e)){ return s; } - - // || is_vector(e) - if constexpr(std::is_same_v){ std::transform(begin (e), end (e) - 1, s.begin (), s.begin ()+1, std::multiplies<>{}); } else { diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index 73430ad23..1250d6456 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -90,7 +90,7 @@ test-suite boost-ublas-tensor-test # test_access.cpp test_einstein_notation.cpp - test_main.cpp + # test_main.cpp test_multi_index.cpp test_multi_index_utility.cpp test_span.cpp diff --git a/test/tensor/algorithm/test_algorithm_trans.cpp b/test/tensor/algorithm/test_algorithm_trans.cpp index 8cda5a307..7e4c602eb 100644 --- a/test/tensor/algorithm/test_algorithm_trans.cpp +++ b/test/tensor/algorithm/test_algorithm_trans.cpp @@ -219,7 +219,7 @@ constexpr auto generate_permuated_extents() noexcept{ constexpr auto sz = ublas::size_v; - constexpr auto helper = [](std::index_sequence ids){ + constexpr auto helper = [](std::index_sequence /*ids*/){ constexpr auto helper1 = [](){ std::array pi; (( pi[sz - Is - 1ul] = ublas::get_v ),...); diff --git a/test/tensor/multiplication/test_multiplication_mtv.cpp b/test/tensor/multiplication/test_multiplication_mtv.cpp index 019b19146..a574a1e2c 100644 --- a/test/tensor/multiplication/test_multiplication_mtv.cpp +++ b/test/tensor/multiplication/test_multiplication_mtv.cpp @@ -214,9 +214,9 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(test_extents_static, std::fill(std::begin(b), std::end(b), value_type{1}); using nc_type = decltype( generate_result_extents() ); - auto nc = ublas::to_array_v; + //auto nc = ublas::to_array_v; // FIXME: use strides_v after the fix - auto wc = get_strides(nc); + // auto wc = get_strides(nc); auto c = std::array >(); std::fill(std::begin(c), std::end(c), value_type{0}); diff --git a/test/tensor/test_access.cpp b/test/tensor/test_access.cpp index 35fcd5938..983fb7987 100644 --- a/test/tensor/test_access.cpp +++ b/test/tensor/test_access.cpp @@ -238,12 +238,12 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index, layout_t, layout_ty auto const& ii = iref[kk]; auto i = multi_index_t(w.size()); ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); -// std::cout << "j= " << jj << std::endl; -// std::cout << "i= [ "; for(auto iii : i) std::cout << iii << " "; std::cout << "];" << std::endl; -// std::cout << "ii_ref = [ "; for(auto iii : ii) std::cout << iii << " "; std::cout << "];" << std::endl; -// std::cout << "n= [ "; for(auto iii : n) std::cout << iii << " "; std::cout << "];" << std::endl; -// std::cout << "w= [ "; for(auto iii : w) std::cout << iii << " "; std::cout << "];" << std::endl; -// std::cout << std::endl; + std::cout << "j= " << jj << std::endl; + std::cout << "i= [ "; for(auto iii : i) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << "ii_ref = [ "; for(auto iii : ii) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << "n= [ "; for(auto iii : n) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << "w= [ "; for(auto iii : w) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << std::endl; BOOST_CHECK ( std::equal(i.begin(),i.end(),ii.begin()) ) ; } }