From db29cdf76cdd4c3732164c99e9a22e9e02bf994f Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Mon, 24 May 2021 12:00:38 +0200 Subject: [PATCH 01/40] Features/tensor refactor (#115) --- .appveyor.yml | 74 -- .clang-tidy | 8 +- .github/workflows/address_san.yml | 75 -- .../workflows/{apple_clang.yml => apple.yml} | 41 +- .../{clangtidy_review.yml => clangtidy.yml} | 51 +- .github/workflows/clangtidy_check.yml | 47 - .github/workflows/code_format.yml | 12 +- .github/workflows/linux.yml | 99 ++ .github/workflows/linux_clang.yml | 97 -- .github/workflows/linux_gcc.yml | 99 -- .github/workflows/sanitizer.yml | 83 ++ .github/workflows/thread_san.yml | 76 -- .github/workflows/ub_san.yml | 76 -- .../{windows_msvc.yml => windows.yml} | 29 +- .travis.yml | 90 -- IDEs/qtcreator/examples/configuration.pri | 27 +- .../examples/tensor/example_access_tensor.pro | 7 - .../tensor/example_instantiate_tensor.pro | 7 - ...ple_multiply_tensors_einstein_notation.pro | 7 - ...mple_multiply_tensors_product_function.pro | 7 - .../tensor/example_simple_expressions.pro | 7 - IDEs/qtcreator/examples/tensor/tensor.pro | 25 +- IDEs/qtcreator/include/include.pro | 6 +- IDEs/qtcreator/include/tensor/tensor.pri | 58 +- IDEs/qtcreator/test/test_tensor.pro | 60 +- IDEs/qtcreator/tests.pri | 140 +-- IDEs/qtcreator/ublas_develop.pro | 4 +- README.md | 77 +- examples/tensor/.clang-tidy | 11 - examples/tensor/access_tensor.cpp | 179 +-- examples/tensor/instantiate_tensor.cpp | 108 +- .../multiply_tensors_einstein_notation.cpp | 277 +++-- .../multiply_tensors_product_function.cpp | 176 +-- examples/tensor/simple_expressions.cpp | 32 +- include/boost/numeric/ublas/tensor.hpp | 3 +- .../boost/numeric/ublas/tensor/algorithms.hpp | 357 +++--- .../boost/numeric/ublas/tensor/concepts.hpp | 34 + .../numeric/ublas/tensor/dynamic_extents.hpp | 241 ---- .../numeric/ublas/tensor/dynamic_strides.hpp | 219 ---- .../boost/numeric/ublas/tensor/expression.hpp | 48 +- .../ublas/tensor/expression_evaluation.hpp | 98 +- .../boost/numeric/ublas/tensor/extents.hpp | 53 + .../ublas/tensor/extents/extents_base.hpp | 54 + .../tensor/extents/extents_dynamic_size.hpp | 154 +++ .../tensor/extents/extents_functions.hpp | 247 ++++ .../ublas/tensor/extents/extents_static.hpp | 78 ++ .../extents/extents_static_functions.hpp | 637 ++++++++++ .../tensor/extents/extents_static_size.hpp | 148 +++ .../ublas/tensor/extents_functions.hpp | 449 ------- .../ublas/tensor/fixed_rank_extents.hpp | 248 ---- .../ublas/tensor/fixed_rank_strides.hpp | 180 ++- .../numeric/ublas/tensor/function/conj.hpp | 81 ++ .../numeric/ublas/tensor/function/imag.hpp | 82 ++ .../numeric/ublas/tensor/function/init.hpp | 120 ++ .../ublas/tensor/function/inner_prod.hpp | 68 + .../numeric/ublas/tensor/function/norm.hpp | 60 + .../ublas/tensor/function/outer_prod.hpp | 283 +++++ .../numeric/ublas/tensor/function/real.hpp | 80 ++ .../numeric/ublas/tensor/function/reshape.hpp | 87 ++ .../tensor/function/tensor_times_matrix.hpp | 256 ++++ .../tensor/function/tensor_times_tensor.hpp | 337 +++++ .../tensor/function/tensor_times_vector.hpp | 240 ++++ .../numeric/ublas/tensor/function/trans.hpp | 78 ++ .../boost/numeric/ublas/tensor/functions.hpp | 1108 +---------------- include/boost/numeric/ublas/tensor/index.hpp | 14 +- .../numeric/ublas/tensor/index_functions.hpp | 64 + .../numeric/ublas/tensor/multi_index.hpp | 66 +- .../ublas/tensor/multi_index_utility.hpp | 136 +- .../numeric/ublas/tensor/multiplication.hpp | 771 ++++++------ .../ublas/tensor/operators_arithmetic.hpp | 365 +++--- .../ublas/tensor/operators_comparison.hpp | 51 +- .../boost/numeric/ublas/tensor/ostream.hpp | 144 +-- .../numeric/ublas/tensor/static_extents.hpp | 150 --- .../numeric/ublas/tensor/static_strides.hpp | 267 ---- .../boost/numeric/ublas/tensor/strides.hpp | 99 -- include/boost/numeric/ublas/tensor/tags.hpp | 14 +- include/boost/numeric/ublas/tensor/tensor.hpp | 53 +- .../ublas/tensor/tensor/tensor_core.hpp | 27 + .../ublas/tensor/tensor/tensor_dynamic.hpp | 466 +++++++ .../ublas/tensor/tensor/tensor_engine.hpp | 29 + .../ublas/tensor/tensor/tensor_static.hpp | 456 +++++++ .../tensor/tensor/tensor_static_rank.hpp | 473 +++++++ .../numeric/ublas/tensor/tensor_core.hpp | 886 ------------- .../numeric/ublas/tensor/tensor_engine.hpp | 50 - .../ublas/tensor/traits/basic_type_traits.hpp | 53 +- .../ublas/tensor/traits/storage_traits.hpp | 105 +- .../traits/type_traits_dynamic_extents.hpp | 46 - .../traits/type_traits_dynamic_strides.hpp | 47 - .../tensor/traits/type_traits_extents.hpp | 41 - .../traits/type_traits_fixed_rank_extents.hpp | 45 - .../traits/type_traits_fixed_rank_strides.hpp | 46 - .../traits/type_traits_static_extents.hpp | 37 - .../traits/type_traits_static_strides.hpp | 46 - .../tensor/traits/type_traits_strides.hpp | 44 - .../tensor/traits/type_traits_tensor.hpp | 41 - .../numeric/ublas/tensor/type_traits.hpp | 11 +- test/tensor/Jamfile | 54 +- test/tensor/test_algorithms.cpp | 886 ++++++------- test/tensor/test_einstein_notation.cpp | 168 +-- test/tensor/test_expression.cpp | 159 +-- test/tensor/test_expression_evaluation.cpp | 156 ++- test/tensor/test_extents.cpp | 731 ----------- test/tensor/test_extents_dynamic.cpp | 190 +++ .../test_extents_dynamic_rank_static.cpp | 155 +++ test/tensor/test_extents_functions.cpp | 634 ++++++++++ .../test_fixed_rank_expression_evaluation.cpp | 349 +++--- test/tensor/test_fixed_rank_extents.cpp | 1027 ++++++++------- test/tensor/test_fixed_rank_functions.cpp | 679 +++++----- .../test_fixed_rank_operators_arithmetic.cpp | 277 ++--- .../test_fixed_rank_operators_comparison.cpp | 268 ++-- test/tensor/test_fixed_rank_strides.cpp | 281 +++-- test/tensor/test_fixed_rank_tensor.cpp | 759 +++++------ .../test_fixed_rank_tensor_matrix_vector.cpp | 637 +++++----- test/tensor/test_functions.cpp | 146 ++- test/tensor/test_multi_index.cpp | 107 +- test/tensor/test_multi_index_utility.cpp | 396 +++--- test/tensor/test_multiplication.cpp | 595 +++++---- test/tensor/test_operators_arithmetic.cpp | 48 +- test/tensor/test_operators_comparison.cpp | 394 +++--- .../test_static_expression_evaluation.cpp | 64 +- test/tensor/test_static_extents.cpp | 691 +++++----- .../test_static_operators_arithmetic.cpp | 34 +- .../test_static_operators_comparison.cpp | 35 +- test/tensor/test_static_strides.cpp | 251 ++-- test/tensor/test_static_tensor.cpp | 152 ++- .../test_static_tensor_matrix_vector.cpp | 415 +++--- test/tensor/test_strides.cpp | 252 ++-- test/tensor/test_tensor.cpp | 124 +- test/tensor/test_tensor_matrix_vector.cpp | 320 ++--- test/tensor/utility.hpp | 67 +- 130 files changed, 12540 insertions(+), 12404 deletions(-) delete mode 100644 .appveyor.yml delete mode 100644 .github/workflows/address_san.yml rename .github/workflows/{apple_clang.yml => apple.yml} (69%) rename .github/workflows/{clangtidy_review.yml => clangtidy.yml} (53%) delete mode 100644 .github/workflows/clangtidy_check.yml create mode 100644 .github/workflows/linux.yml delete mode 100644 .github/workflows/linux_clang.yml delete mode 100644 .github/workflows/linux_gcc.yml create mode 100644 .github/workflows/sanitizer.yml delete mode 100644 .github/workflows/thread_san.yml delete mode 100644 .github/workflows/ub_san.yml rename .github/workflows/{windows_msvc.yml => windows.yml} (77%) delete mode 100644 .travis.yml delete mode 100644 IDEs/qtcreator/examples/tensor/example_access_tensor.pro delete mode 100644 IDEs/qtcreator/examples/tensor/example_instantiate_tensor.pro delete mode 100644 IDEs/qtcreator/examples/tensor/example_multiply_tensors_einstein_notation.pro delete mode 100644 IDEs/qtcreator/examples/tensor/example_multiply_tensors_product_function.pro delete mode 100644 IDEs/qtcreator/examples/tensor/example_simple_expressions.pro delete mode 100644 examples/tensor/.clang-tidy create mode 100644 include/boost/numeric/ublas/tensor/concepts.hpp delete mode 100644 include/boost/numeric/ublas/tensor/dynamic_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/dynamic_strides.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_base.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_dynamic_size.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_functions.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_static.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_static_functions.hpp create mode 100644 include/boost/numeric/ublas/tensor/extents/extents_static_size.hpp delete mode 100644 include/boost/numeric/ublas/tensor/extents_functions.hpp delete mode 100644 include/boost/numeric/ublas/tensor/fixed_rank_extents.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/conj.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/imag.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/init.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/inner_prod.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/norm.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/outer_prod.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/real.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/reshape.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/tensor_times_matrix.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/tensor_times_tensor.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp create mode 100644 include/boost/numeric/ublas/tensor/function/trans.hpp create mode 100644 include/boost/numeric/ublas/tensor/index_functions.hpp delete mode 100644 include/boost/numeric/ublas/tensor/static_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/static_strides.hpp delete mode 100644 include/boost/numeric/ublas/tensor/strides.hpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/tensor_core.hpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp delete mode 100644 include/boost/numeric/ublas/tensor/tensor_core.hpp delete mode 100644 include/boost/numeric/ublas/tensor/tensor_engine.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_strides.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_strides.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_static_extents.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_static_strides.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_strides.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/type_traits_tensor.hpp delete mode 100644 test/tensor/test_extents.cpp create mode 100644 test/tensor/test_extents_dynamic.cpp create mode 100644 test/tensor/test_extents_dynamic_rank_static.cpp create mode 100644 test/tensor/test_extents_functions.cpp diff --git a/.appveyor.yml b/.appveyor.yml deleted file mode 100644 index 2b2d8fd53..000000000 --- a/.appveyor.yml +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2018 Stefan Seefeld -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -version: 1.0.{build}-{branch} - -shallow_clone: true - -branches: - only: - - master - - develop - - /feature\/.*/ - -environment: - matrix: - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 - TOOLSET: msvc-14.2 - CXXSTD: latest - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 - TOOLSET: msvc-14.2 - CXXSTD: 17 - - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 - TOOLSET: msvc-14.0 - CXXSTD: 11 - -install: - - cd "C:\Tools\vcpkg" - - git pull - - .\bootstrap-vcpkg.bat - - cd %appveyor_build_folder% - # Install OpenCL runtime (driver) for Intel / Xeon package - - appveyor DownloadFile "http://registrationcenter-download.intel.com/akdlm/irc_nas/9022/opencl_runtime_16.1.1_x64_setup.msi" - - start /wait msiexec /i opencl_runtime_16.1.1_x64_setup.msi /qn /l*v msiexec2.log - # FIXME: To be removed https://help.appveyor.com/discussions/problems/13000-cmake_toolchain_filevcpkgcmake-conflicts-with-cmake-native-findboostcmake" - - ps: 'Write-Host "Installing latest vcpkg.cmake module" -ForegroundColor Magenta' - - appveyor DownloadFile https://raw.githubusercontent.com/Microsoft/vcpkg/master/scripts/buildsystems/vcpkg.cmake -FileName "c:\tools\vcpkg\scripts\buildsystems\vcpkg.cmake" - - set "TRIPLET=x64-windows" - - vcpkg --triplet %TRIPLET% install opencl clblas - - set PATH=C:\Tools\vcpkg\installed\%TRIPLET%\bin;%PATH% - - set VCPKG_I=C:\Tools\vcpkg\installed\%TRIPLET%\include - - set VCPKG_L=C:\Tools\vcpkg\installed\%TRIPLET%\lib - - set BOOST_BRANCH=develop - - if "%APPVEYOR_REPO_BRANCH%" == "master" set BOOST_BRANCH=master - - cd .. - - git clone -b %BOOST_BRANCH% https://github.com/boostorg/boost.git boost-root - - cd boost-root - - git submodule update --init tools/build - - git submodule update --init libs/config - - git submodule update --init tools/boostdep - - xcopy /s /e /q %APPVEYOR_BUILD_FOLDER% libs\numeric\ublas - - python tools/boostdep/depinst/depinst.py -I benchmarks numeric/ublas - - xcopy %APPVEYOR_BUILD_FOLDER%\opencl.jam %USERPROFILE% - - xcopy %APPVEYOR_BUILD_FOLDER%\clblas.jam %USERPROFILE% - - ps: | - # Creating %USERPROFILE%/user-config.jam file - @' - import os regex toolset ; - local toolset = [ regex.split [ os.environ TOOLSET ] "-" ] ; - local vcpkg_i = [ os.environ VCPKG_I ] ; - local vcpkg_l = [ os.environ VCPKG_L ] ; - using $(toolset[1]) : $(toolset[2-]:J="-") : ; - using opencl : : $(vcpkg_i) $(vcpkg_l) ; - using clblas : : $(vcpkg_i) $(vcpkg_l) ; - '@ | sc "$env:USERPROFILE/user-config.jam" - - cmd /c bootstrap - - b2 -j3 headers - -build: off - -test_script: - - if not "%CXXSTD%" == "" set CXXSTD=cxxstd=%CXXSTD% - - set ADDRMD=address-model=64 - - b2 -j3 libs/numeric/ublas/test toolset=%TOOLSET% %CXXSTD% %ADDRMD% \ No newline at end of file diff --git a/.clang-tidy b/.clang-tidy index e17df9a0e..4a0edffcd 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,6 +1,6 @@ --- -Checks: '-*,modernize-*,cppcoreguidelines-*,openmp-*,bugprone-*,performance-*,portability-*,readability-*,-modernize-use-trailing-return-type,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-readability-uppercase-literal-suffix,-readability-braces-around-statements' -WarningsAsErrors: '-*,modernize-*,cppcoreguidelines-*,openmp-*,bugprone-*,performance-*,portability-*,readability-*,-modernize-use-trailing-return-type,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-readability-uppercase-literal-suffix,-readability-braces-around-statements' +Checks: '-*,clang-*,bugprone-*,cppcoreguidelines-*,google-*,hicpp-*,modernize-*,performance-*,readability-*,portability-*, +-modernize-use-trailing-return-type, -readability-uppercase-literal-suffix, -readability-braces-around-statements, -hicpp-uppercase-literal-suffix, -hicpp-braces-around-statements, -hicpp-no-array-decay, -cppcoreguidelines-pro-bounds-constant-array-index, -cppcoreguidelines-pro-bounds-pointer-arithmetic, -cppcoreguidelines-pro-bounds-array-to-pointer-decay, -readability-avoid-const-params-in-decls, -google-readability-braces-around-statements,-google-explicit-constructor,-hicpp-vararg,-cppcoreguidelines-pro-type-vararg, -cppcoreguidelines-avoid-non-const-global-variables, -google-readability-todo, -cppcoreguidelines-pro-type-member-init, -hicpp-member-init, -cppcoreguidelines-special-member-functions, -hicpp-special-member-functions' HeaderFilterRegex: 'boost\/numeric\/ublas\/tensor\/.*' AnalyzeTemporaryDtors: false FormatStyle: file @@ -8,4 +8,8 @@ User: ublas-developers CheckOptions: - key: modernize-use-nullptr.NullMacros value: 'NULL' + - key: readability-magic-numbers.IgnoredIntegerValues + value: '1;2;3;4;5;6;7;8;9;' + - key: cppcoreguidelines-avoid-magic-numbers.IgnoredIntegerValues + value: '1;2;3;4;5;6;7;8;9;' ... diff --git a/.github/workflows/address_san.yml b/.github/workflows/address_san.yml deleted file mode 100644 index 5cf0c85db..000000000 --- a/.github/workflows/address_san.yml +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Address Sanitizer -# Trigger on Push to the repository, regardless of the branch. -# For fine tune, You can add specific branches or tags. -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' -jobs: - build: - name: "${{matrix.config.cxx}} -std=c++${{matrix.config.cxxstd}}" - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early - matrix: - config: - - {cc: gcc-9, cxx: g++-9, cxxstd: 2a, name: gcc} - - {cc: clang-9, cxx: clang++-9, cxxstd: 2a, name: clang} - steps: - - uses: actions/checkout@v2 - - - name: Prepare BOOST_ROOT - run: | - cd ${GITHUB_WORKSPACE} - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - cd boost-root - - git submodule update --init --depth=1 --jobs 8 tools/build - git submodule update --init --depth=1 --jobs 8 libs/config - git submodule update --init --depth=1 --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas - python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> $GITHUB_ENV - - - name: Prepare Build - run: | - echo $BOOST_ROOT - cd $BOOST_ROOT - echo "using ${{matrix.config.name}} : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; - - - name: Test Benchmarks - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} - - - name: Test Tensor Examples - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=address -fno-omit-frame-pointer" linkflags="-fsanitize=address" - - - name: Test Tensor - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=address -fno-omit-frame-pointer" linkflags="-fsanitize=address" diff --git a/.github/workflows/apple_clang.yml b/.github/workflows/apple.yml similarity index 69% rename from .github/workflows/apple_clang.yml rename to .github/workflows/apple.yml index dfaa5d470..f0c8df792 100644 --- a/.github/workflows/apple_clang.yml +++ b/.github/workflows/apple.yml @@ -1,8 +1,9 @@ # Copyright (c) 2020 Mohammad Ashar Khan +# Copyright (c) 2021 Cem Bassoy # Distributed under Boost Software License, Version 1.0 # (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) -name: Apple Clang +name: "Apple MacOS" on: push: @@ -15,24 +16,26 @@ on: - 'doc/**' jobs: build: - name: "Darwin 11.0 -std=c++${{matrix.cxxstd}}" - runs-on: macos-latest + name: "MacOS 10.15 clang -std=c++${{matrix.cxxstd}}" + runs-on: macos-10.15 strategy: - fail-fast: false + fail-fast: true matrix: - cxxstd: [11, 17, 2a] + cxxstd: [2a] steps: - uses: actions/checkout@v2 - - name: Prepare BOOST_ROOT + - name: Git Clone Boost.uBlas run: | cd ${GITHUB_WORKSPACE} cd .. git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root cd boost-root - + echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} + echo $BOOST_ROOT + git submodule update --init --depth=1 --jobs 8 tools/build git submodule update --init --depth=1 --jobs 8 libs/config git submodule update --init --depth=1 --jobs 8 tools/boostdep @@ -41,33 +44,29 @@ jobs: cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} - - - name: Prepare Build + - name: Bootstrap and Compile Boost.uBlas run: | - echo $BOOST_ROOT cd $BOOST_ROOT - - echo "using clang : : clang++ ;" >> ~/user-config.jam; - + ./bootstrap.sh + ./b2 -j8 headers + echo "using clang : : $(brew --prefix llvm)/bin/clang++ ;" >> ~/user-config.jam; + + - name: Test Benchmarks run: | cd $BOOST_ROOT cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=clang cxxstd=${{matrix.cxxstd}} + $BOOST_ROOT/b2 -j8 benchmarks cxxstd=${{matrix.cxxstd}} - name: Test Tensor Examples run: | cd $BOOST_ROOT cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=clang cxxstd=${{matrix.cxxstd}} + $BOOST_ROOT/b2 -j8 examples/tensor cxxstd=${{matrix.cxxstd}} - name: Test Tensor run: | cd $BOOST_ROOT cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=clang cxxstd=${{matrix.cxxstd}} + $BOOST_ROOT/b2 -j8 test/tensor cxxstd=${{matrix.cxxstd}} + diff --git a/.github/workflows/clangtidy_review.yml b/.github/workflows/clangtidy.yml similarity index 53% rename from .github/workflows/clangtidy_review.yml rename to .github/workflows/clangtidy.yml index b14c5bb9f..11b1aea41 100644 --- a/.github/workflows/clangtidy_review.yml +++ b/.github/workflows/clangtidy.yml @@ -1,26 +1,30 @@ # Copyright (c) 2020 Mohammad Ashar Khan +# Copyright (c) 2021 Cem Bassoy # Distributed under Boost Software License, Version 1.0 # (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) -name: Clang tidy Review - -on: [pull_request] - +name: "Static Analysis" + +on: + push: + paths-ignore: + - '**.md' + - 'doc/**' + pull_request: + paths-ignore: + - '**.md' + - 'doc/**' jobs: check: - name: Clang tidy Review + name: Linux Clang-Tidy runs-on: ubuntu-20.04 - if: github.event.pull_request.head.repo.full_name == github.repository steps: - - uses: actions/checkout@v2 - - name: "Install dependencies" - run: | - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key 2>/dev/null | sudo apt-key add - - sudo add-apt-repository 'deb http://apt.llvm.org/focal llvm-toolchain-focal-10 main' -y - sudo apt-get update -q - sudo apt-get install -y clang-10 clang-tidy-10 + - uses: actions/checkout@v2 + + - name: Install Clang 11 + run: sudo apt-get update && sudo apt-get install -y clang-11 clang-tidy-11 - - name: "Install Boost from Source" + - name: "Install Boost from Source" run: | cd .. git clone --depth 1 https://github.com/boostorg/boost.git --recursive --shallow-submodules @@ -29,19 +33,20 @@ jobs: ./b2 headers sudo cp -r -L boost /usr/include rm -rf boost + + - name: "Run Clang-Tidy" + run: clang-tidy-11 examples/tensor/*.cpp test/tensor/*.cpp -- -Iinclude -std=c++20 > reports.txt + - name: "Print Clang-Tidy Report" + run: cat reports.txt + - uses: actions/setup-python@v2 - - - name: "Run clang-tidy check" - run: | - clang-tidy-10 examples/tensor/*.cpp -- -Iinclude -std=c++17 > reports.txt - + - name: "Post review comments" - if: always() + if: github.event.pull_request.head.repo.full_name == github.repository run: | pip3 install 'unidiff~=0.6.0' --force-reinstall pip3 install 'PyGithub~=1.51' --force-reinstall pip3 install 'requests~=2.23' --force-reinstall - - python ./.ci/post_review.py --repository ${{ github.repository }} --token ${{ github.token}} --pr ${{ github.event.pull_request.number }} --path reports.txt - + python ./.ci/post_review.py --repository ${{ github.repository }} --token ${{ github.token}} --pr ${{ github.event.pull_request.number }} --path reports.txt + diff --git a/.github/workflows/clangtidy_check.yml b/.github/workflows/clangtidy_check.yml deleted file mode 100644 index 1b5236352..000000000 --- a/.github/workflows/clangtidy_check.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Clang tidy checks - -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' -jobs: - check: - name: Clang tidy Check - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - - - name: "Install dependencies" - run: | - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key 2>/dev/null | sudo apt-key add - - sudo add-apt-repository 'deb http://apt.llvm.org/focal llvm-toolchain-focal-10 main' -y - sudo apt-get update -q - sudo apt-get install -y clang-10 clang-tidy-10 - - - name: "Install Boost from Source" - run: | - cd .. - git clone --depth 1 https://github.com/boostorg/boost.git --recursive --shallow-submodules - cd boost - ./bootstrap.sh - ./b2 headers - sudo cp -r -L boost /usr/include - rm -rf boost - - - name: "Run clang-tidy check" - run: | - clang-tidy-10 examples/tensor/*.cpp -- -Iinclude -std=c++17 > reports.txt - - - name: "Show clang tidy report" - if: always() - run: | - cat reports.txt diff --git a/.github/workflows/code_format.yml b/.github/workflows/code_format.yml index f1ba49372..a7c53648b 100644 --- a/.github/workflows/code_format.yml +++ b/.github/workflows/code_format.yml @@ -2,7 +2,7 @@ # Distributed under Boost Software License, Version 1.0 # (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) -name: "Code Format" +name: "Clang Code Format" on: push: @@ -15,7 +15,7 @@ on: - 'doc/**' jobs: format: - name: "Code Formatting Check" + name: "Clang Code Formatting Check" runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 @@ -24,8 +24,8 @@ jobs: run: | sudo apt-get install -y clang-format-10 - - name: "Format Codes" - run: clang-format-10 -i examples/tensor/*.cpp test/tensor/*.cpp include/boost/numeric/ublas/tensor/*.hpp include/boost/numeric/ublas/tensor/*/*.hpp +# - name: "Format Codes" +# run: clang-format-10 -i examples/tensor/*.cpp test/tensor/*.cpp include/boost/numeric/ublas/tensor/*.hpp include/boost/numeric/ublas/tensor/*/*.hpp - - name: Check diff - run: git diff --exit-code HEAD +# - name: Check diff +# run: git diff --exit-code HEAD diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml new file mode 100644 index 000000000..20b9c5311 --- /dev/null +++ b/.github/workflows/linux.yml @@ -0,0 +1,99 @@ +# Copyright (c) 2020 Mohammad Ashar Khan +# Copyright (c) 2021 Cem Bassoy +# Distributed under Boost Software License, Version 1.0 +# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) + +name: "Linux" + +on: + push: + paths-ignore: + - '**.md' + - 'doc/**' + pull_request: + paths-ignore: + - '**.md' + - 'doc/**' + +jobs: + build: + name: Ubuntu 20.04 "cxx=${{matrix.config.cxx}}, std=c++${{matrix.config.cxxstd}}, variant=c++${{matrix.config.variant}}" + runs-on: ubuntu-20.04 + strategy: + fail-fast: true + # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early + matrix: + config: + - { name: clang, cc: clang-10, cxx: clang++-10, cxxstd: 20, variant: debug, opt: off} + - { name: clang, cc: clang-11, cxx: clang++-11, cxxstd: 20, variant: debug, opt: off} + - { name: clang, cc: clang-10, cxx: clang++-10, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp, ldflags: -lgomp=libomp5} + - { name: clang, cc: clang-11, cxx: clang++-11, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp, ldflags: -lgomp=libomp5} + - { name: gcc, cc: gcc-10, cxx: g++-10, cxxstd: 20, variant: debug, opt: off} + - { name: gcc, cc: gcc-10, cxx: g++-10, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp, ldflags: -lgomp} + + steps: + - uses: actions/checkout@v2 + + - name: Install g++-10 + if: matrix.config.cxx == 'g++-10' + run: sudo apt update && sudo apt-get install -y g++-10 libomp-dev + + - name: Install Clang 10 + if: matrix.config.cxx == 'clang++-10' + run: sudo apt-get update && sudo apt-get install -y clang-10 + + - name: Install Clang 11 + if: matrix.config.cxx == 'clang++-11' + run: sudo apt-get update && sudo apt-get install -y clang-11 + + - name: Git Clone Boost.uBlas + run: | + cd ${GITHUB_WORKSPACE} + cd .. + + git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root + cd boost-root + echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} + echo $BOOST_ROOT + + git submodule update --init --depth=1 --jobs 8 tools/build + git submodule update --init --depth=1 --jobs 8 libs/config + git submodule update --init --depth=1 --jobs 8 tools/boostdep + + mkdir -p libs/numeric/ + cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas + python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas + + - name: Bootstrap Boost and Compile Boost + run: | + cd $BOOST_ROOT + ./bootstrap.sh + ./b2 -j8 headers + echo "using ${{ matrix.config.name }} : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; + +# - name: Test Benchmarks +# run: | +# cd $BOOST_ROOT +# cd libs/numeric/ublas +# $BOOST_ROOT/b2 -j 4 benchmarks toolset=clang cxxstd=${{matrix.config.cxxstd}} cxxflags="-O3" + + - name: Test Tensor Examples + run: | + cd $BOOST_ROOT/libs/numeric/ublas + if [ -z "$cxxflags" ] + then + $BOOST_ROOT/b2 -j8 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} + else + $BOOST_ROOT/b2 -j8 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} cxxflags="${{matrix.config.cxxflags}}" linkflags="${{matrix.config.ldflags}}" + fi + + - name: Test Tensor Unit-Tests + run: | + cd $BOOST_ROOT/libs/numeric/ublas + if [ -z "$cxxflags" ] + then + $BOOST_ROOT/b2 -j8 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} + else + $BOOST_ROOT/b2 -j8 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} cxxflags="${{matrix.config.cxxflags}}" linkflags="${{matrix.config.ldflags}}" + fi + diff --git a/.github/workflows/linux_clang.yml b/.github/workflows/linux_clang.yml deleted file mode 100644 index e03dfcc1a..000000000 --- a/.github/workflows/linux_clang.yml +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Linux Clang Release -# Trigger on Push to the repository, regardless of the branch. -# For fine tune, You can add specific branches or tags. -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' - -jobs: - build: - name: "${{matrix.config.cxx}} -std=c++${{matrix.config.cxxstd}}" - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early - matrix: - config: - - { cc: clang-6.0, cxx: clang++-6.0, cxxstd: 11} - - { cc: clang-8, cxx: clang++-8, cxxstd: 11} - - { cc: clang-9, cxx: clang++-9, cxxstd: 11} - - { cc: clang-10, cxx: clang++-10, cxxstd: 11} - - { cc: clang-6.0, cxx: clang++-6.0, cxxstd: 17} - - { cc: clang-8, cxx: clang++-8, cxxstd: 17} - - { cc: clang-9, cxx: clang++-9, cxxstd: 17} - - { cc: clang-10, cxx: clang++-10, cxxstd: 17} - - { cc: clang-10, cxx: clang++-10, cxxstd: 2a} - - steps: - - uses: actions/checkout@v2 - - - name: Install Clang 10 - if: matrix.config.cxx == 'clang++-10' - run: | - sudo apt-get update - sudo apt-get install -y clang-10 - - - name: Install Clang 6 - if: matrix.config.cxx == 'clang++-6.0' - run: | - sudo apt-get update - sudo apt-get install -y clang-6.0 - - - name: Prepare BOOST_ROOT - run: | - cd ${GITHUB_WORKSPACE} - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - cd boost-root - - git submodule update --init --depth=1 --jobs 8 tools/build - git submodule update --init --depth=1 --jobs 8 libs/config - git submodule update --init --depth=1 --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas - python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} - - - name: Prepare Build - run: | - echo $BOOST_ROOT - cd $BOOST_ROOT - - echo "using clang : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; - - - name: Test Benchmarks - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=clang cxxstd=${{matrix.config.cxxstd}} cxxflags="-O3" - - - name: Test Tensor Examples - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=clang cxxstd=${{matrix.config.cxxstd}} cxxflags="-O3" - - - name: Test Tensor - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=clang cxxstd=${{matrix.config.cxxstd}} cxxflags="-O3" diff --git a/.github/workflows/linux_gcc.yml b/.github/workflows/linux_gcc.yml deleted file mode 100644 index 3fa47504e..000000000 --- a/.github/workflows/linux_gcc.yml +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan, Cem Bassoy -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Linux GCC Debug -# Trigger on Push to the repository, regardless of the branch. -# For fine tune, You can add specific branches or tags. -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' -jobs: - build: - name: "${{matrix.config.cxx}} -std=c++${{matrix.config.cxxstd}}" - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early - matrix: - config: - - {cc: gcc-7, cxx: g++-7, cxxstd: 11} - - {cc: gcc-8, cxx: g++-8, cxxstd: 11} - - {cc: gcc-9, cxx: g++-9, cxxstd: 11} - - {cc: gcc-10, cxx: g++-10, cxxstd: 11} - - {cc: gcc-7, cxx: g++-7, cxxstd: 17} - - {cc: gcc-8, cxx: g++-8, cxxstd: 17} - - {cc: gcc-9, cxx: g++-9, cxxstd: 17} - - {cc: gcc-10, cxx: g++-10, cxxstd: 17} - - {cc: gcc-10, cxx: g++-10, cxxstd: 2a} - steps: - - uses: actions/checkout@v2 - - - name: Install GCC-10 - if: matrix.config.cxx == 'g++-10' - run: | - sudo apt update - sudo apt-get install -y g++-10 - - - name: Prepare BOOST_ROOT - run: | - cd ${GITHUB_WORKSPACE} - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - cd boost-root - - git submodule update --init --depth=1 --jobs 8 tools/build - git submodule update --init --depth=1 --jobs 8 libs/config - git submodule update --init --depth=1 --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas - python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> $GITHUB_ENV - - - name: Prepare Build - run: | - echo $BOOST_ROOT - cd $BOOST_ROOT - - echo "using gcc : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; - - - name: Test Benchmarks - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=gcc cxxstd=${{matrix.config.cxxstd}} - - - name: Test Tensor Examples - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=gcc cxxstd=${{matrix.config.cxxstd}} cxxflags="-O0" - - - name: Test Tensor - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=gcc cxxstd=${{matrix.config.cxxstd}} cxxflags="-O0 -g --coverage" linkflags="--coverage" - - - name: Report Code coverage - if: matrix.config.cxxstd == '17' && matrix.config.cxx == 'g++-9' - run: | - ${GITHUB_WORKSPACE}/.ci/report_coverage.sh - - curl -s https://codecov.io/bash > cov.sh - chmod +x cov.sh - ./cov.sh -f coverage.info || echo "Codecov did not collect coverage reports" - diff --git a/.github/workflows/sanitizer.yml b/.github/workflows/sanitizer.yml new file mode 100644 index 000000000..a2edff34b --- /dev/null +++ b/.github/workflows/sanitizer.yml @@ -0,0 +1,83 @@ +# Copyright (c) 2020 Mohammad Ashar Khan +# Copyright (c) 2021 Cem Bassoy +# Distributed under Boost Software License, Version 1.0 +# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) + +name: "Clang Sanitizer" + +on: + push: + paths-ignore: + - '**.md' + - 'doc/**' + pull_request: + paths-ignore: + - '**.md' + - 'doc/**' +jobs: + build: + name: "${{matrix.config.name}} with ${{matrix.config.description}} sanitizer with std=${{matrix.config.cxx}} and variant=${{matrix.config.variant}}" + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early + matrix: + config: + - { name: clang, description: address and leak, cc: clang-11, cxx: clang++-11, cxxstd: 20, variant: debug, opt: off , cxxflags: -fsanitize=address -fno-omit-frame-pointer, ldflags: -fsanitize=address } + - { name: clang, description: undefined behavior, cc: clang-11, cxx: clang++-11, cxxstd: 20, variant: debug, opt: off , cxxflags: -fsanitize=undefined, ldflags: -fsanitize=undefined } + - { name: clang, description: thread, cc: clang-11, cxx: clang++-11, cxxstd: 20, variant: debug, opt: off , cxxflags: -fsanitize=thread, ldflags: -fsanitize=thread } + - { name: gcc, description: address and leak, cc: gcc-10, cxx: g++-10, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp -fsanitize=address -fno-omit-frame-pointer, ldflags: -fsanitize=address -fopenmp} +# - { name: gcc, description: undefined behavior, cc: gcc-10, cxx: g++-10, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp -fsanitize=undefined, ldflags: -fsanitize=undefined -fopenmp} +# - { name: gcc, description: thread, cc: gcc-10, cxx: g++-10, cxxstd: 20, variant: release, opt: speed, cxxflags: -fopenmp -fsanitize=thread, ldflags: -fsanitize=thread -fopenmp} + + steps: + - uses: actions/checkout@v2 + + - name: Install Clang 11 + run: sudo apt-get update && sudo apt-get install -y clang-11 clang-tools-11 + + - name: Install GCC 10 + run: sudo apt-get update && sudo apt-get install -y g++-10 libomp-dev + + - name: Git Clone Boost.uBlas + run: | + cd ${GITHUB_WORKSPACE} + cd .. + + git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root + cd boost-root + echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} + echo $BOOST_ROOT + + git submodule update --init --depth=1 --jobs 8 tools/build + git submodule update --init --depth=1 --jobs 8 libs/config + git submodule update --init --depth=1 --jobs 8 tools/boostdep + + mkdir -p libs/numeric/ + cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas + python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas + + - name: Bootstrap Boost and Compile Boost + run: | + cd $BOOST_ROOT + ./bootstrap.sh + ./b2 -j8 headers + echo "using ${{ matrix.config.name }} : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; + export ASAN_OPTIONS=symbolize=1 + export ASAN_OPTIONS=detect_leaks=1 + + - name: Run Tensor Examples with "${{matrix.config.description}}" sanitizer + run: | + cd $BOOST_ROOT/libs/numeric/ublas + ASAN_OPTIONS=detect_leaks=1 + ASAN_OPTIONS=symbolize=1 + $BOOST_ROOT/b2 -j8 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} cxxflags="${{matrix.config.cxxflags}}" linkflags="${{matrix.config.ldflags}}" + + - name: Run Tensor Tests with "${{matrix.config.description}}" sanitizer + run: | + cd $BOOST_ROOT/libs/numeric/ublas + ASAN_OPTIONS=detect_leaks=1 + ASAN_OPTIONS=symbolize=1 + $BOOST_ROOT/b2 -j8 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} variant=${{matrix.config.variant}} optimization=${{matrix.config.opt}} cxxflags="${{matrix.config.cxxflags}}" linkflags="${{matrix.config.ldflags}}" + + diff --git a/.github/workflows/thread_san.yml b/.github/workflows/thread_san.yml deleted file mode 100644 index 4fc416a0d..000000000 --- a/.github/workflows/thread_san.yml +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Thread Sanitizer -# Trigger on Push to the repository, regardless of the branch. -# For fine tune, You can add specific branches or tags. -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' -jobs: - build: - name: "${{matrix.config.cxx}} -std=c++${{matrix.config.cxxstd}}" - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early - matrix: - config: - - {cc: gcc-9, cxx: g++-9, cxxstd: 2a, name: gcc} - - {cc: clang-9, cxx: clang++-9, cxxstd: 2a, name: clang} - steps: - - uses: actions/checkout@v2 - - - name: Prepare BOOST_ROOT - run: | - cd ${GITHUB_WORKSPACE} - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - cd boost-root - - git submodule update --init --depth=1 --jobs 8 tools/build - git submodule update --init --depth=1 --jobs 8 libs/config - git submodule update --init --depth=1 --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas - python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} - - - name: Prepare Build - run: | - echo $BOOST_ROOT - cd $BOOST_ROOT - - echo "using ${{matrix.config.name}} : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; - - - name: Test Benchmarks - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} - - - name: Test Tensor Examples - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=thread -O2" linkflags="-fsanitize=thread" - - - name: Test Tensor - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=thread -O2" linkflags="-fsanitize=thread" diff --git a/.github/workflows/ub_san.yml b/.github/workflows/ub_san.yml deleted file mode 100644 index 1985bf458..000000000 --- a/.github/workflows/ub_san.yml +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2020 Mohammad Ashar Khan -# Distributed under Boost Software License, Version 1.0 -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - -name: Undefined Behaviour Sanitizer -# Trigger on Push to the repository, regardless of the branch. -# For fine tune, You can add specific branches or tags. -on: - push: - paths-ignore: - - '**.md' - - 'doc/**' - pull_request: - paths-ignore: - - '**.md' - - 'doc/**' -jobs: - build: - name: "${{matrix.config.cxx}} -std=c++${{matrix.config.cxxstd}}" - runs-on: ubuntu-20.04 - strategy: - fail-fast: false - # If any compiler fails to compile, continue CI for next compiler in matrix instead of failing early - matrix: - config: - - {cc: gcc-9, cxx: g++-9, cxxstd: 2a, name: gcc} - - {cc: clang-9, cxx: clang++-9, cxxstd: 2a, name: clang} - steps: - - uses: actions/checkout@v2 - - - name: Prepare BOOST_ROOT - run: | - cd ${GITHUB_WORKSPACE} - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - cd boost-root - - git submodule update --init --depth=1 --jobs 8 tools/build - git submodule update --init --depth=1 --jobs 8 libs/config - git submodule update --init --depth=1 --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - cp -rp ${GITHUB_WORKSPACE}/. libs/numeric/ublas - python tools/boostdep/depinst/depinst.py -g " --depth=1" -I benchmarks numeric/ublas - - ./bootstrap.sh - ./b2 -j 8 headers - - #echo ::set-env name=BOOST_ROOT::${PWD} - echo "BOOST_ROOT=${PWD}" >> ${GITHUB_ENV} - - - name: Prepare Build - run: | - echo $BOOST_ROOT - cd $BOOST_ROOT - - echo "using ${{matrix.config.name}} : : ${{ matrix.config.cxx }} ;" >> ~/user-config.jam; - - - name: Test Benchmarks - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 benchmarks toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} - - - name: Test Tensor Examples - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 examples/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=undefined" linkflags="-fsanitize=undefined" - - - name: Test Tensor - run: | - cd $BOOST_ROOT - cd libs/numeric/ublas - $BOOST_ROOT/b2 -j 4 test/tensor toolset=${{matrix.config.name}} cxxstd=${{matrix.config.cxxstd}} cxxflags="-g -fsanitize=undefined" linkflags="-fsanitize=undefined" diff --git a/.github/workflows/windows_msvc.yml b/.github/workflows/windows.yml similarity index 77% rename from .github/workflows/windows_msvc.yml rename to .github/workflows/windows.yml index 59e6ff05d..7bdfc69be 100644 --- a/.github/workflows/windows_msvc.yml +++ b/.github/workflows/windows.yml @@ -1,8 +1,9 @@ # Copyright (c) 2020 Mohammad Ashar Khan +# Copyright (c) 2021 Cem Bassoy # Distributed under Boost Software License, Version 1.0 # (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) -name: "Windows MSVC" +name: "Windows" on: push: paths-ignore: @@ -14,15 +15,15 @@ on: - 'doc/**' jobs: build: - name: "windows=${{matrix.config.os}} msvc=${{matrix.config.version}} std=c++${{matrix.config.cxxstd}}" + name: "Windows=${{matrix.config.os}} msvc=${{matrix.config.version}} std=c++${{matrix.config.cxxstd}}" runs-on: ${{matrix.config.os}} strategy: - fail-fast: false + fail-fast: true matrix: config: - - {os: windows-2016, toolset: msvc, version: 14.16, cxxstd: 11} - - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 11} - - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 17} +# - {os: windows-2016, toolset: msvc, version: 14.16, cxxstd: 11} +# - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 11} +# - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 17} - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: latest} steps: @@ -70,24 +71,24 @@ jobs: cmd /c bootstrap b2 -j8 headers - - name: Test Benchmarks - shell: cmd - run: | - cd %BOOST_ROOT% - cd libs\numeric\ublas - %BOOST_ROOT%\b2 -j 4 benchmarks toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 +# - name: Test Benchmarks +# shell: cmd +# run: | +# cd %BOOST_ROOT% +# cd libs\numeric\ublas +# %BOOST_ROOT%\b2 -j 4 benchmarks toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 - name: Test Tensor Examples shell: cmd run: | cd %BOOST_ROOT% cd libs\numeric\ublas - %BOOST_ROOT%\b2 -j 4 examples/tensor toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 + %BOOST_ROOT%\b2 -j8 examples/tensor toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 - name: Test Tensor shell: cmd run: | cd %BOOST_ROOT% cd libs\numeric\ublas - %BOOST_ROOT%\b2 -j 4 test/tensor toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 + %BOOST_ROOT%\b2 -j8 test/tensor toolset=%TOOLSET% cxxstd=${{matrix.config.cxxstd}} address-model=64 diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index f4578d1b4..000000000 --- a/.travis.yml +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2018 Stefan Seefeld -# Distributed under the Boost Software License, Version 1.0. -# (See accompanying file LICENSE_1_0.txt or copy at http://boost.org/LICENSE_1_0.txt) - - -language: cpp - -dist: bionic - -sudo: required - -branches: - only: - - master - - develop - - doc - - ci - -# env: specifies additional global variables to define per row in build matrix -env: - global: - - CLBLAS_PREFIX=${TRAVIS_BUILD_DIR}/CLBLAS/ - - PATH=${CLBLAS_PREFIX}/bin:$PATH - - LD_LIBRARY_PATH=${CLBLAS_PREFIX}/lib:$LD_LIBRARY_PATH - -matrix: - include: - - os: linux - env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=11 - - os: linux - env: TOOLSET=gcc COMPILER=g++-7 CXXSTD=17 - - os: linux - env: TOOLSET=gcc COMPILER=g++-9 CXXSTD=2a - - os: linux - env: TOOLSET=clang COMPILER=clang++-7 CXXSTD=17 - - os: linux - env: TOOLSET=clang COMPILER=clang++-10 CXXSTD=2a - -addons: - apt: - sources: - - sourceline: 'deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-10 main' - key_url: 'https://apt.llvm.org/llvm-snapshot.gpg.key' - - sourceline: 'ppa:ubuntu-toolchain-r/test' - packages: - - g++-7 - - g++-9 - - clang-7 - - clang-10 - - libopenblas-base - - rpm2cpio - - cpio - - clinfo - - opencl-headers - - ocl-icd-opencl-dev - -before_install: - - if [ ${TRAVIS_OS_NAME} == "linux" ]; then .ci/install-ocl-ubuntu.sh; fi - - .ci/install-clblas.sh - - cmake --version; - - ${CC} --version; - - ${CXX} --version; - -install: - - cd .. - - git clone -b master --depth 1 https://github.com/boostorg/boost.git boost-root - - cd boost-root - - git submodule update --init --jobs 8 tools/build - - git submodule update --init --jobs 8 libs/config - - git submodule update --init --jobs 8 tools/boostdep - - mkdir -p libs/numeric/ - - cp -rp $TRAVIS_BUILD_DIR/. libs/numeric/ublas - - python tools/boostdep/depinst/depinst.py -I benchmarks numeric/ublas - - ./bootstrap.sh - - ./b2 -j 8 headers - - export BOOST_ROOT="`pwd`" - -# use script: to execute build steps -script: - - |- - echo "using $TOOLSET : : $COMPILER ;" >> ~/user-config.jam; - echo "using clblas : : ${CLBLAS_PREFIX}/include ${CLBLAS_PREFIX}/lib ;" >> ~/user-config.jam; - cp $TRAVIS_BUILD_DIR/opencl.jam ~/ - cp $TRAVIS_BUILD_DIR/clblas.jam ~/ - - cd libs/numeric/ublas - - $BOOST_ROOT/b2 -j 8 test toolset=$TOOLSET cxxstd=$CXXSTD - -notifications: - email: - on_success: always \ No newline at end of file diff --git a/IDEs/qtcreator/examples/configuration.pri b/IDEs/qtcreator/examples/configuration.pri index 0c0c8650f..dde3f8f8f 100644 --- a/IDEs/qtcreator/examples/configuration.pri +++ b/IDEs/qtcreator/examples/configuration.pri @@ -1,17 +1,24 @@ CONFIG -= qt CONFIG += depend_includepath win*: CONFIG += console +CONFIG += object_parallel_to_source -QMAKE_CXXFLAGS += -std=c++17 -fopenmp -g +QMAKE_CXXFLAGS =-std=c++20 +QMAKE_CXXFLAGS +=-Wall -Wpedantic -Wextra +QMAKE_CXXFLAGS +=-Wno-unknown-pragmas +QMAKE_CXXFLAGS +=-Wno-unused-but-set-variable -# If ublas tests are build with boost source code then, -# then boost headers and boost libraries should be used. +gcc:QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp +clang: QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp=libiomp5 -BOOST_ROOT=../../../../../.. +gcc:QMAKE_CXXFLAGS_DEBUG += -g +clang: QMAKE_CXXFLAGS_DEBUG +=-g -exists( $$BOOST_ROOT/boost-build.jam ) { - message("Boost installed.") - INCLUDEPATH += $${BOOST_ROOT}/../libs/numeric/ublas/include - LIBS += -L$${BOOST_ROOT}/../stage/lib -lgomp - QMAKE_RPATHDIR += $${BOOST_ROOT}/../stage/lib -} +BOOST_ROOT=../../../../../../../.. +QMAKE_RPATHDIR += $${BOOST_ROOT}/stage/lib +INCLUDEPATH+=$$BOOST_ROOT/libs/numeric/ublas/include +LIBS+=-L$${BOOST_ROOT}/stage/lib + +#message("INCLUDEPATH: $${INCLUDEPATH}") + +INCLUDE_DIR=$${BOOST_ROOT}/libs/numeric/ublas/include diff --git a/IDEs/qtcreator/examples/tensor/example_access_tensor.pro b/IDEs/qtcreator/examples/tensor/example_access_tensor.pro deleted file mode 100644 index c6d761159..000000000 --- a/IDEs/qtcreator/examples/tensor/example_access_tensor.pro +++ /dev/null @@ -1,7 +0,0 @@ -TEMPLATE = app -TARGET = access_tensor - -include (../configuration.pri) - -SOURCES += \ - ../../../../examples/tensor/access_tensor.cpp diff --git a/IDEs/qtcreator/examples/tensor/example_instantiate_tensor.pro b/IDEs/qtcreator/examples/tensor/example_instantiate_tensor.pro deleted file mode 100644 index e98706b38..000000000 --- a/IDEs/qtcreator/examples/tensor/example_instantiate_tensor.pro +++ /dev/null @@ -1,7 +0,0 @@ -TEMPLATE = app -TARGET = instantiate_tensor - -include (../configuration.pri) - -SOURCES += \ - ../../../../examples/tensor/instantiate_tensor.cpp diff --git a/IDEs/qtcreator/examples/tensor/example_multiply_tensors_einstein_notation.pro b/IDEs/qtcreator/examples/tensor/example_multiply_tensors_einstein_notation.pro deleted file mode 100644 index 1aca61e34..000000000 --- a/IDEs/qtcreator/examples/tensor/example_multiply_tensors_einstein_notation.pro +++ /dev/null @@ -1,7 +0,0 @@ -TEMPLATE = app -TARGET = multiply_tensors_einstein_notation - -include (../configuration.pri) - -SOURCES += \ - ../../../../examples/tensor/multiply_tensors_einstein_notation.cpp diff --git a/IDEs/qtcreator/examples/tensor/example_multiply_tensors_product_function.pro b/IDEs/qtcreator/examples/tensor/example_multiply_tensors_product_function.pro deleted file mode 100644 index bf02e0228..000000000 --- a/IDEs/qtcreator/examples/tensor/example_multiply_tensors_product_function.pro +++ /dev/null @@ -1,7 +0,0 @@ -TEMPLATE = app -TARGET = multiply_tensors_product_function - -include (../configuration.pri) - -SOURCES += \ - ../../../../examples/tensor/multiply_tensors_product_function.cpp diff --git a/IDEs/qtcreator/examples/tensor/example_simple_expressions.pro b/IDEs/qtcreator/examples/tensor/example_simple_expressions.pro deleted file mode 100644 index 1c65b9cc6..000000000 --- a/IDEs/qtcreator/examples/tensor/example_simple_expressions.pro +++ /dev/null @@ -1,7 +0,0 @@ -TEMPLATE = app -TARGET = simple_expressions - -include (../configuration.pri) - -SOURCES += \ - ../../../../examples/tensor/simple_expressions.cpp diff --git a/IDEs/qtcreator/examples/tensor/tensor.pro b/IDEs/qtcreator/examples/tensor/tensor.pro index feb833f62..f928b32bf 100644 --- a/IDEs/qtcreator/examples/tensor/tensor.pro +++ b/IDEs/qtcreator/examples/tensor/tensor.pro @@ -1,21 +1,10 @@ -#TEMPLATE = subdirs -#SUBDIRS = \ -# construction_access \ -# simple_expressions \ -# multiply_tensors_prod \ -# einstein_notation \ -# instantiate_tensor +TEMPLATE = subdirs +SUBDIRS = \ + simple_expressions \ + multiply_tensors_product_function \ + multiply_tensors_einstein_notation \ + instantiate_tensor \ + access_tensor -include ( example_instantiate_tensor.pro ) -include ( example_access_tensor.pro ) -include ( example_simple_expressions.pro ) -include ( example_multiply_tensors_product_function.pro ) -include ( example_multiply_tensors_einstein_notation.pro ) - -#instantiate_tensor.file = example_instantiate_tensor.pro -#construction_access.file = example_construction_access.pro -#simple_expressions.file = example_simple_expressions.pro -#multiply_tensors_prod.file = example_multiply_tensors_prod.pro -#einstein_notation.file = example_einstein_notation.pro diff --git a/IDEs/qtcreator/include/include.pro b/IDEs/qtcreator/include/include.pro index 17d199106..a5aeead8b 100644 --- a/IDEs/qtcreator/include/include.pro +++ b/IDEs/qtcreator/include/include.pro @@ -1,11 +1,9 @@ TEMPLATE = lib TARGET = ublas -CONFIG += \ - staticlib \ - depend_includepath +CONFIG += staticlib depend_includepath CONFIG -= qt -CONFIG += c++17 +CONFIG += c++20 INCLUDE_DIR=../../../include include(detail/detail.pri) diff --git a/IDEs/qtcreator/include/tensor/tensor.pri b/IDEs/qtcreator/include/tensor/tensor.pri index 4cfae5d9f..112376c11 100644 --- a/IDEs/qtcreator/include/tensor/tensor.pri +++ b/IDEs/qtcreator/include/tensor/tensor.pri @@ -1,14 +1,11 @@ HEADERS += \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/algorithms.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/dynamic_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/dynamic_strides.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/expression.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/expression_evaluation.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents_functions.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/fixed_rank_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/fixed_rank_strides.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/functions.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/index.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/index_functions.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/layout.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/multi_index.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/multi_index_utility.hpp \ @@ -16,24 +13,43 @@ HEADERS += \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/operators_arithmetic.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/operators_comparison.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/ostream.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/static_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/static_strides.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/strides.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tags.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor_core.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor_engine.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/concepts.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/type_traits.hpp + +HEADERS += \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/traits/basic_type_traits.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/traits/storage_traits.hpp + +HEADERS += \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_dynamic_size.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_static_size.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_static_functions.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_static.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_base.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/extents/extents_functions.hpp + + +HEADERS += \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_core.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_static.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp + + HEADERS += \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/basic_type_traits.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/storage_traits.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_dynamic_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_dynamic_strides.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_fixed_rank_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_fixed_rank_strides.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_static_extents.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_static_strides.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_strides.hpp \ - $${INCLUDE_DIR}/boost/numeric/ublas/tensor/detail/type_traits_tensor.hpp + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/inner_prod.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/init.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/outer_prod.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/trans.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/norm.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/imag.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/real.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/conj.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/tensor_times_matrix.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/tensor_times_tensor.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/reshape.hpp diff --git a/IDEs/qtcreator/test/test_tensor.pro b/IDEs/qtcreator/test/test_tensor.pro index 5966e27d6..8deee2f99 100644 --- a/IDEs/qtcreator/test/test_tensor.pro +++ b/IDEs/qtcreator/test/test_tensor.pro @@ -1,38 +1,46 @@ TEMPLATE = app -TARGET = test +TARGET = test_tensor +CONFIG += staticlib depend_includepath console CONFIG -= qt -CONFIG += depend_includepath debug -win*: CONFIG += console +CONFIG += c++20 #QMAKE_CXXFLAGS += -fno-inline -QMAKE_CXXFLAGS += -std=c++17 -#QMAKE_CXXFLAGS += -Wno-unknown-pragmas -#QMAKE_CXXFLAGS += --coverage +QMAKE_CXXFLAGS =-std=c++20 +QMAKE_CXXFLAGS +=-Wall -Wpedantic -Wextra +QMAKE_CXXFLAGS +=-Wno-unknown-pragmas +QMAKE_CXXFLAGS +=-Wno-unused-but-set-variable + + +gcc:QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp +clang: QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp=libiomp5 +gcc:QMAKE_CXXFLAGS_DEBUG = -g +clang: QMAKE_CXXFLAGS_DEBUG =-g -DEFINES += BOOST_UBLAS_NO_EXCEPTIONS -win*: DEFINES += _SCL_SECURE_NO_WARNINGS -#Visual age IBM -xlc: DEFINES += BOOST_UBLAS_NO_ELEMENT_PROXIES +#QMAKE_CXXFLAGS += --coverage + +BOOST_ROOT=../../../../../.. -# If ublas tests are build with boost source code then, -# then boost headers and boost libraries should be used. -#exists(../../../../../../boost-build.jam) { -# INCLUDEPATH += ../../../../../.. -# LIBS += -L../../../../../../stage/lib -# QMAKE_RPATHDIR += ../../../../../../stage/lib +#exists( $$BOOST_ROOT/boost-build.jam ) { +# message("Boost installed.") +# INCLUDEPATH += $${BOOST_ROOT}/libs/numeric/ublas/include +# LIBS += -L$${BOOST_ROOT}/stage/lib -lgomp +# QMAKE_RPATHDIR += $${BOOST_ROOT}/stage/lib #} -INCLUDEPATH += /usr/local/include -INCLUDEPATH += ../../../include -LIBS += -L/usr/local/lib -LIBS +=-lboost_unit_test_framework -# -lgcov +QMAKE_RPATHDIR += $${BOOST_ROOT}/stage/lib +INCLUDEPATH+=$$BOOST_ROOT/libs/numeric/ublas/include +LIBS+=-L$${BOOST_ROOT}/stage/lib -lboost_unit_test_framework -lgomp +#message("INCLUDEPATH: $${INCLUDEPATH}") + +INCLUDE_DIR=$${BOOST_ROOT}/libs/numeric/ublas/include TEST_DIR = ../../../test/tensor +include(../include/tensor/tensor.pri) + HEADERS += \ $${TEST_DIR}/utility.hpp @@ -41,7 +49,8 @@ SOURCES += \ $${TEST_DIR}/test_einstein_notation.cpp \ $${TEST_DIR}/test_expression.cpp \ $${TEST_DIR}/test_expression_evaluation.cpp \ - $${TEST_DIR}/test_extents.cpp \ + $${TEST_DIR}/test_extents_dynamic.cpp \ + $${TEST_DIR}/test_extents_dynamic_rank_static.cpp \ $${TEST_DIR}/test_fixed_rank_expression_evaluation.cpp \ $${TEST_DIR}/test_fixed_rank_extents.cpp \ $${TEST_DIR}/test_fixed_rank_functions.cpp \ @@ -66,8 +75,5 @@ SOURCES += \ $${TEST_DIR}/test_static_tensor_matrix_vector.cpp \ $${TEST_DIR}/test_strides.cpp \ $${TEST_DIR}/test_tensor.cpp \ - $${TEST_DIR}/test_tensor_matrix_vector.cpp - - -INCLUDEPATH += \ - ../../../include + $${TEST_DIR}/test_tensor_matrix_vector.cpp \ + $${TEST_DIR}/test_extents_functions.cpp diff --git a/IDEs/qtcreator/tests.pri b/IDEs/qtcreator/tests.pri index 7b55d478c..04e131f59 100644 --- a/IDEs/qtcreator/tests.pri +++ b/IDEs/qtcreator/tests.pri @@ -1,72 +1,72 @@ SUBDIRS += \ - begin_end \ - comp_mat_erase \ - concepts \ - num_columns \ - num_rows \ - placement_new \ - size \ - sparse_view_test \ - test1 \ - test2 \ - test3 \ - test3_coo \ - test3_mvov \ - test4 \ - test5 \ - test6 \ - test7 \ - test_assignment \ - test_banded_storage_layout \ - test_complex_norms \ - test_coordinate_matrix_inplace_merge \ - test_coordinate_matrix_sort \ - test_coordinate_matrix_always_do_full_sort \ - test_coordinate_vector_inplace_merge \ - test_fixed_containers \ - test_inplace_solve_basic \ - test_inplace_solve_sparse \ - test_inplace_solve_mvov \ - test_lu \ - test_matrix_vector \ - test_ticket7296 \ - test_triangular \ - triangular_access \ - triangular_layout \ - test_tensor +# begin_end \ +# comp_mat_erase \ +# concepts \ +# num_columns \ +# num_rows \ +# placement_new \ +# size \ +# sparse_view_test \ +# test1 \ +# test2 \ +# test3 \ +# test3_coo \ +# test3_mvov \ +# test4 \ +# test5 \ +# test6 \ +# test7 \ +# test_assignment \ +# test_banded_storage_layout \ +# test_complex_norms \ +# test_coordinate_matrix_inplace_merge \ +# test_coordinate_matrix_sort \ +# test_coordinate_matrix_always_do_full_sort \ +# test_coordinate_vector_inplace_merge \ +# test_fixed_containers \ +# test_inplace_solve_basic \ +# test_inplace_solve_sparse \ +# test_inplace_solve_mvov \ +# test_lu \ +# test_matrix_vector \ +# test_ticket7296 \ +# test_triangular \ +# triangular_access \ +# triangular_layout \ + # test_tensor -begin_end.file = test/begin_end.pro -comp_mat_erase.file = test/comp_mat_erase.pro -concepts.file = test/concepts.pro -num_columns.file = test/num_columns.pro -num_rows.file = test/num_rows.pro -placement_new.file = test/placement_new.pro -size.file = test/size.pro -sparse_view_test.file = test/sparse_view_test.pro -test1.file = test/test1.pro -test2.file = test/test2.pro -test3.file = test/test3.pro -test3_coo.file = test/test3_coo.pro -test3_mvov.file = test/test3_mvov.pro -test4.file = test/test4.pro -test5.file = test/test5.pro -test6.file = test/test6.pro -test7.file = test/test7.pro -test_assignment.file = test/test_assignment.pro -test_banded_storage_layout.file = test/test_banded_storage_layout.pro -test_complex_norms.file = test/test_complex_norms.pro -test_coordinate_matrix_inplace_merge.file = test/test_coordinate_matrix_inplace_merge.pro -test_coordinate_matrix_sort.file = test/test_coordinate_matrix_sort.pro -test_coordinate_matrix_always_do_full_sort.file = test/test_coordinate_matrix_always_do_full_sort.pro -test_coordinate_vector_inplace_merge.file = test/test_coordinate_vector_inplace_merge.pro -test_fixed_containers.file = test/test_fixed_containers.pro -test_inplace_solve_basic.file = test/test_inplace_solve_basic.pro -test_inplace_solve_sparse.file = test/test_inplace_solve_sparse.pro -test_inplace_solve_mvov.file = test/test_inplace_solve_mvov.pro -test_lu.file = test/test_lu.pro -test_matrix_vector.file = test/test_matrix_vector.pro -test_ticket7296.file = test/test_ticket7296.pro -test_triangular.file = test/test_triangular.pro -triangular_access.file = test/triangular_access.pro -triangular_layout.file = test/triangular_layout.pro -test_tensor.file = test/test_tensor.pro +#begin_end.file = test/begin_end.pro +#comp_mat_erase.file = test/comp_mat_erase.pro +#concepts.file = test/concepts.pro +#num_columns.file = test/num_columns.pro +#num_rows.file = test/num_rows.pro +#placement_new.file = test/placement_new.pro +#size.file = test/size.pro +#sparse_view_test.file = test/sparse_view_test.pro +#test1.file = test/test1.pro +#test2.file = test/test2.pro +#test3.file = test/test3.pro +#test3_coo.file = test/test3_coo.pro +#test3_mvov.file = test/test3_mvov.pro +#test4.file = test/test4.pro +#test5.file = test/test5.pro +#test6.file = test/test6.pro +#test7.file = test/test7.pro +#test_assignment.file = test/test_assignment.pro +#test_banded_storage_layout.file = test/test_banded_storage_layout.pro +#test_complex_norms.file = test/test_complex_norms.pro +#test_coordinate_matrix_inplace_merge.file = test/test_coordinate_matrix_inplace_merge.pro +#test_coordinate_matrix_sort.file = test/test_coordinate_matrix_sort.pro +#test_coordinate_matrix_always_do_full_sort.file = test/test_coordinate_matrix_always_do_full_sort.pro +#test_coordinate_vector_inplace_merge.file = test/test_coordinate_vector_inplace_merge.pro +#test_fixed_containers.file = test/test_fixed_containers.pro +#test_inplace_solve_basic.file = test/test_inplace_solve_basic.pro +#test_inplace_solve_sparse.file = test/test_inplace_solve_sparse.pro +#test_inplace_solve_mvov.file = test/test_inplace_solve_mvov.pro +#test_lu.file = test/test_lu.pro +#test_matrix_vector.file = test/test_matrix_vector.pro +#test_ticket7296.file = test/test_ticket7296.pro +#test_triangular.file = test/test_triangular.pro +#triangular_access.file = test/triangular_access.pro +#triangular_layout.file = test/triangular_layout.pro +#test_tensor.file = test/test_tensor.pro diff --git a/IDEs/qtcreator/ublas_develop.pro b/IDEs/qtcreator/ublas_develop.pro index e509e747e..49fc2d99c 100644 --- a/IDEs/qtcreator/ublas_develop.pro +++ b/IDEs/qtcreator/ublas_develop.pro @@ -4,7 +4,9 @@ SUBDIRS = include # examples # benchmarks OTHER_FILES += ../../changelog.txt -include (tests.pri) +#include (tests.pri) + + diff --git a/README.md b/README.md index 22ae6db18..1e9ec9223 100644 --- a/README.md +++ b/README.md @@ -8,64 +8,53 @@ Boost Linear and Multilinear Algebra Library [![Mailing List](https://img.shields.io/badge/ublas-mailing%20list-4eb899.svg)](https://lists.boost.org/mailman/listinfo.cgi/ublas) [![Gitter](https://img.shields.io/badge/ublas-chat%20on%20gitter-4eb899.svg)](https://gitter.im/boostorg/ublas) -**Boost.uBLAS** is part of the [Boost C++ Libraries](http://github.com/boostorg). It is directed towards scientific computing on the level of basic linear and multilinear algebra operations with tensors, matrices and vectors. +[![Windows](https://github.com/boostorg/ublas/actions/workflows/windows.yml/badge.svg)](https://github.com/boostorg/ublas/actions/workflows/windows.yml) +[![Linux](https://github.com/boostorg/ublas/actions/workflows/linux.yml/badge.svg)](https://github.com/boostorg/ublas/actions/workflows/linux.yml) +[![Apple MacOS](https://github.com/boostorg/ublas/actions/workflows/apple.yml/badge.svg)](https://github.com/boostorg/ublas/actions/workflows/apple.yml) +[![Clang Sanitizer](https://github.com/boostorg/ublas/actions/workflows/sanitizer.yml/badge.svg)](https://github.com/boostorg/ublas/actions/workflows/sanitizer.yml) +[![Clang Tidy](https://github.com/boostorg/ublas/actions/workflows/clangtidy.yml/badge.svg)](https://github.com/boostorg/ublas/actions/workflows/clangtidy.yml) +[![Codecov](https://codecov.io/gh/boostorg/ublas/branch/master/graph/badge.svg)](https://codecov.io/gh/boostorg/ublas/branch/master) +**Boost.uBLAS** is part of the [Boost C++ Libraries](http://github.com/boostorg). +It is directed towards scientific computing on the level of basic linear and multilinear algebra operations with tensors, matrices and vectors. + ## Documentation uBLAS is documented at [boost.org](https://www.boost.org/doc/libs/1_69_0/libs/numeric/ublas/doc/index.html). -The tensor extension has also a [wiki page](https://github.com/BoostGSoC18/tensor/wiki). +The tensor extension has also a [wiki page](https://github.com/boostorg/ublas/wiki/Tensor). ## License Distributed under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt). ## Properties -* Header-only -* Tensor extension requires C++17 compatible compiler +* header-only +* requires C++20 compatible compiler + * gcc version >= 10.x.x + * clang version >= 10.x.x + * msvc version >= 14.28 * Unit-tests require Boost.Test -## Build Status - - -#### Tensor Build & Test - -| Operating System | Compiler | [`master`](https://github.com/boostorg/ublas/tree/master) | [`develop`](https://github.com/boostorg/ublas/tree/develop) | -| :-------------------------: | :-----------------: | :----------------------------------------------------------: | :----------------------------------------------------------: | -| Linux (Ubuntu 20.04 x86_64) | gcc-{7, 8, 9, 10} | [![Linux GCC Debug](https://github.com/boostorg/ublas/workflows/Linux%20GCC%20Debug/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Linux+GCC%22+branch%3Amaster) | [![Linux GCC Debug](https://github.com/boostorg/ublas/workflows/Linux%20GCC%20Debug/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Linux+GCC%22+branch%3Adevelop) | -| Linux (Ubuntu 20.04 x86_64) | clang-{6, 8, 9, 10} | [![Linux Clang Release](https://github.com/boostorg/ublas/workflows/Linux%20Clang%20Release/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Linux+Clang%22+branch%3Amaster) | [![Linux Clang Release](https://github.com/boostorg/ublas/workflows/Linux%20Clang%20Release/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Linux+Clang%22+branch%3Adevelop) | -| Windows 10 (x86_64) | msvc-{14.16, 14.26} | [![Windows MSVC](https://github.com/boostorg/ublas/workflows/Windows%20MSVC/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Windows+MSVC%22+branch%3Amaster) | [![Windows MSVC](https://github.com/boostorg/ublas/workflows/Windows%20MSVC/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Windows+MSVC%22+branch%3Adevelop) | -| MacOS Catalina (x86_64) | clang-11 | [![Apple Clang](https://github.com/boostorg/ublas/workflows/Apple%20Clang/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Apple+Clang%22+branch%3Amaster) | [![Apple Clang](https://github.com/boostorg/ublas/workflows/Apple%20Clang/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Apple+Clang%22+branch%3Adevelop) | - -#### Tensor Additional Checks - -| Checks | [`master`](https://github.com/boostorg/ublas/tree/master) | [`develop`](https://github.com/boostorg/ublas/tree/develop) | -| :-----------: | :----------------------------------------------------------: | :----------------------------------------------------------: | -| UB Sanitizer | [![Undefined Behaviour Sanitizer](https://github.com/boostorg/ublas/workflows/Undefined%20Behaviour%20Sanitizer/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Undefined+Behaviour+Sanitizer%22+branch%3Amaster) | [![Undefined Behaviour Sanitizer](https://github.com/boostorg/ublas/workflows/Undefined%20Behaviour%20Sanitizer/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Undefined+Behaviour+Sanitizer%22+branch%3Adevelop) | -| TH Sanitizer | [![Thread Sanitizer](https://github.com/boostorg/ublas/workflows/Thread%20Sanitizer/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Thread+Sanitizer%22+branch%3Amaster) | [![Thread Sanitizer](https://github.com/boostorg/ublas/workflows/Thread%20Sanitizer/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Thread+Sanitizer%22+branch%3Adevelop) | -| ADD Sanitizer | [![Address Sanitizer](https://github.com/boostorg/ublas/workflows/Address%20Sanitizer/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Address+Sanitizer%22+branch%3Amaster) | [![Address Sanitizer](https://github.com/boostorg/ublas/workflows/Address%20Sanitizer/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Address+Sanitizer%22+branch%3Adevelop) | -| Codecov | [![codecov](https://codecov.io/gh/boostorg/ublas/branch/master/graph/badge.svg)](https://codecov.io/gh/boostorg/ublas/branch/master) | [![codecov](https://codecov.io/gh/boostorg/ublas/branch/develop/graph/badge.svg)](https://codecov.io/gh/boostorg/ublas/branch/develop) | -| Clang-Format | [![Code Format](https://github.com/boostorg/ublas/workflows/Code%20Format/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Code+Format%22+branch%3Amaster) | [![Code Format](https://github.com/boostorg/ublas/workflows/Code%20Format/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Code+Format%22+branch%3Adevelop) | -| Clang-Tidy | [![Clang tidy checks](https://github.com/boostorg/ublas/workflows/Clang%20tidy%20checks/badge.svg?branch=master)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Clang+tidy+checks%22+branch%3Amaster) | [![Clang tidy checks](https://github.com/boostorg/ublas/workflows/Clang%20tidy%20checks/badge.svg?branch=develop)](https://github.com/boostorg/ublas/actions?query=workflow%3A%22Clang+tidy+checks%22+branch%3Adevelop) | +## Simple Example -#### Tensor Build Information +```cpp +#include +#include -| OS | Toolchain | Compiler Flags | -| :----------------: | :-------------: | :-----------------------------------------------: | -| Linux Ubuntu 20.04 | GCC | `-O0` | -| Linux Ubuntu 20.04 | Clang | `-O3` | -| Windows 10 | MSVC | No Special Flags | -| MacOS Catalina | Clang | No Special Flags | -| UB Sanitizer | GCC and Clang | `"-g -fsanitize=undefined"` | -| TH Sanitizer | GCC and Clang | `"-g -fsanitize=thread -O2"` | -| ADD Sanitizer | GCC and Clang | `"-g -fsanitize=address -fno-omit-frame-pointer"` | -| Clang Tidy | Clang-Tidy-10 | [Configuration File](.clang-tidy) | -| Clang Format | Clang-Format-10 | [Configuration File](.clang-format) | +int main() +{ + using namespace boost::numeric::ublas::index; + using tensor = boost::numeric::ublas::tensor_dynamic; + auto ones = boost::numeric::ublas::ones{}; -#### uBLAS CI + tensor A = ones(3,4,5); + tensor B = ones(4,6,3,2); -Branch | Travis | Appveyor | Regression | Docs -:-----: | ------ | --------- | ----------- | ----- - [`master`](https://github.com/boostorg/ublas/tree/master) | [![Build Status](https://travis-ci.org/boostorg/ublas.svg?branch=master)](https://travis-ci.org/boostorg/ublas) | [![Build status](https://ci.appveyor.com/api/projects/status/ctu3wnfowa627ful/branch/master?svg=true)](https://ci.appveyor.com/project/stefanseefeld/ublas/branch/master) | [![ublas](https://img.shields.io/badge/ublas-master-blue.svg)](https://www.boost.org/development/tests/master/developer/numeric-ublas.html) | [![Documentation](https://img.shields.io/badge/docs-develop-brightgreen.svg)](http://www.boost.org/doc/libs/release/libs/numeric) - [`develop`](https://github.com/boostorg/ublas/tree/develop) | [![Build Status](https://travis-ci.org/boostorg/ublas.svg?branch=develop)](https://travis-ci.org/boostorg/ublas) | [![Build status](https://ci.appveyor.com/api/projects/status/ctu3wnfowa627ful/branch/develop?svg=true)](https://ci.appveyor.com/project/stefanseefeld/ublas/branch/develop) | [![ublas](https://img.shields.io/badge/ublas-develop-blue.svg)](https://www.boost.org/development/tests/develop/developer/numeric-ublas.html) | [![Documentation](https://img.shields.io/badge/docs-develop-brightgreen.svg)](http://www.boost.org/doc/libs/release/libs/numeric) + tensor C = 2*ones(5,6,2) + A(_i,_j,_k)*B(_j,_l,_i,_m) + 5; + + // Matlab Compatible Formatted Output + std::cout << "C=" << C << ";" << std::endl; +} +``` ## Directories @@ -79,7 +68,7 @@ Branch | Travis | Appveyor | Regression | Docs ## More information -* If you would like to test the library, contribute new feature or a bug fix, see [contribution](https://github.com/boostorg/ublas/wiki/Guidelines-for-Contribution) where the whole development infrastructure and the contributing workflow is explained in details. +* If you would like to test the library, contribute new feature or a bug fix, see [contribution](https://github.com/boostorg/ublas/wiki/Guidelines-for-Contribution). * Ask questions in [stackoverflow](http://stackoverflow.com/questions/ask?tags=c%2B%2B,boost,boost-ublas) with `boost-ublas` or `ublas` tags. * Report [bugs](https://github.com/boostorg/ublas/issues) and be sure to mention Boost version, platform and compiler you're using. A small compilable code sample to reproduce the problem is always good as well. * Submit your patches as pull requests against **develop** branch. Note that by submitting patches you agree to license your modifications under the [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt). diff --git a/examples/tensor/.clang-tidy b/examples/tensor/.clang-tidy deleted file mode 100644 index 50e8131ae..000000000 --- a/examples/tensor/.clang-tidy +++ /dev/null @@ -1,11 +0,0 @@ ---- -Checks: '-*,modernize-*,cppcoreguidelines-*,openmp-*,bugprone-*,performance-*,portability-*,readability-*,-modernize-use-trailing-return-type,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-readability-uppercase-literal-suffix,-readability-braces-around-statements,-cppcoreguidelines-avoid-magic-numbers,-readability-magic-numbers,-bugprone-exception-escape' -WarningsAsErrors: '-*,modernize-*,cppcoreguidelines-*,openmp-*,bugprone-*,performance-*,portability-*,readability-*,-modernize-use-trailing-return-type,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-readability-uppercase-literal-suffix,-readability-braces-around-statements,-cppcoreguidelines-avoid-magic-numbers,-readability-magic-numbers,-bugprone-exception-escape' -HeaderFilterRegex: 'boost\/numeric\/ublas\/tensor\/.*' -AnalyzeTemporaryDtors: false -FormatStyle: file -User: ublas-developers -CheckOptions: - - key: modernize-use-nullptr.NullMacros - value: 'NULL' -... diff --git a/examples/tensor/access_tensor.cpp b/examples/tensor/access_tensor.cpp index ebd7b2fc6..97e797fb8 100644 --- a/examples/tensor/access_tensor.cpp +++ b/examples/tensor/access_tensor.cpp @@ -15,89 +15,102 @@ #include +//NOLINTNEXTLINE int main() { - using namespace boost::numeric::ublas; - using namespace boost::multiprecision; - - { - using value_t = float; - using format_t = boost::numeric::ublas::layout::first_order; // storage format - using tensor_t = boost::numeric::ublas::dynamic_tensor; - - // creates a three-dimensional tensor with extents 3,4 and 2 - // tensor A stores single-precision floating-point number according - // to the first-order storage format - - auto A = tensor_t{3,4,2}; - - // initializes the tensor with increasing values along the first-index - // using a single index. - auto vf = 1.0f; - for(auto i = 0u; i < A.size(); ++i, vf += 1.0f) - A[i] = vf; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "A=" << A << ";" << std::endl << std::endl; - } - - - { - using value_t = std::complex; - using format_t = boost::numeric::ublas::layout::last_order; // storage format - using tensor_t = boost::numeric::ublas::dynamic_tensor; - using shape_t = typename tensor_t::extents_type; - - // creates a four-dimensional tensor with extents 5,4,3 and 2 - // tensor A stores complex floating-point extended double precision numbers - // according to the last-order storage format - // and initializes it with the default value. - - auto B = tensor_t(shape_t{5,4,3,2},value_t{}); - - // initializes the tensor with increasing values along the last-index - // using a single-index - auto vc = value_t(0,0); - for(auto i = 0u; i < B.size(); ++i, vc += value_t(1,1)) - B[i] = vc; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "B=" << B << ";" << std::endl << std::endl; - - - auto C = tensor_t(B.extents()); - // computes the complex conjugate of elements of B - // using multi-index notation. - for(auto i = 0u; i < B.size(0); ++i) - for(auto j = 0u; j < B.size(1); ++j) - for(auto k = 0u; k < B.size(2); ++k) - for(auto l = 0u; l < B.size(3); ++l) - C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); - - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "C=" << C << ";" << std::endl << std::endl; - - - - // computes the complex conjugate of elements of B - // using iterators. - auto D = tensor_t(B.extents()); - std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "D=" << D << ";" << std::endl << std::endl; - - // reshaping tensors. - auto new_extents = B.extents().base(); - std::next_permutation( new_extents.begin(), new_extents.end() ); - D.reshape( extents<>(new_extents) ); - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "newD=" << D << ";" << std::endl << std::endl; - } + namespace ublas = boost::numeric::ublas; + + try { + using value = float; + using layout = ublas::layout::first_order; // storage format + using tensor = ublas::tensor_dynamic; +// constexpr auto ones = ublas::ones{}; + constexpr auto zeros = ublas::zeros{}; + + // creates a three-dimensional tensor with extents 3,4 and 2 + // tensor A stores single-precision floating-point number according + // to the first-order storage format + + tensor A = zeros(3,4,2); + + // initializes the tensor with increasing values along the first-index + // using a single index. + auto vf = 1.0f; + for(auto i = 0u; i < A.size(); ++i, vf += 1.0f) + A[i] = vf; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "A=" << A << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of access-tensor." << std::endl; + } + + + try { + using value = std::complex; + using layout = ublas::layout::last_order; // storage format + using tensor = ublas::tensor_dynamic; + using shape = typename tensor::extents_type; + constexpr auto zeros = ublas::zeros{}; + + + // creates a four-dimensional tensor with extents 5,4,3 and 2 + // tensor A stores complex floating-point extended double precision numbers + // according to the last-order storage format + // and initializes it with the default value. + + //NOLINTNEXTLINE + tensor B = zeros(5,4,3,2); + + // initializes the tensor with increasing values along the last-index + // using a single-index + auto vc = value(0,0); + for(auto i = 0u; i < B.size(); ++i, vc += value(1,1)) + B[i] = vc; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "B=" << B << ";" << std::endl << std::endl; + + + auto C = tensor(B.extents()); + // computes the complex conjugate of elements of B + // using multi-index notation. + for(auto i = 0u; i < B.size(0); ++i) + for(auto j = 0u; j < B.size(1); ++j) + for(auto k = 0u; k < B.size(2); ++k) + for(auto l = 0u; l < B.size(3); ++l) + C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); + + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "C=" << C << ";" << std::endl << std::endl; + + + + // computes the complex conjugate of elements of B + // using iterators. + auto D = tensor(B.extents()); + std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "D=" << D << ";" << std::endl << std::endl; + + // reshaping tensors. + auto new_extents = B.extents().base(); + std::next_permutation( new_extents.begin(), new_extents.end() ); + auto E = reshape( D, shape(new_extents) ); + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "E=" << E << ";" << std::endl << std::endl; + + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of access-tensor." << std::endl; + } } diff --git a/examples/tensor/instantiate_tensor.cpp b/examples/tensor/instantiate_tensor.cpp index bb3d8a11e..851716c8d 100644 --- a/examples/tensor/instantiate_tensor.cpp +++ b/examples/tensor/instantiate_tensor.cpp @@ -15,79 +15,107 @@ #include #include -void instantiate_dynamic_tensor() +void instantiate_tensor_dynamic() { + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = ublas::layout::first_order; // storage format + using tensor = boost::numeric::ublas::tensor_dynamic; + constexpr auto ones = ublas::ones{}; - using value_t = float; - using format_t = boost::numeric::ublas::layout::first_order; // storage format - using tensor_t = boost::numeric::ublas::dynamic_tensor; - using shape_t = typename tensor_t::extents_type; - - // tensor type has dynamic order and dimensions + + try { + // tensor is resizable has dynamic dimensions // elements are stored contiguously in memory using the 1st-format (column-major) - auto t1 = tensor_t{3,4,2}; + tensor t1 = ones(3,4,2); std::cout << "t1 = " << t1 << std::endl; - auto t2 = tensor_t(shape_t{3,4,2},2.0F); + tensor t2 = 2 * ones(3,4,2); std::cout << "t2 = " << t2 << std::endl; - auto t3 = tensor_t(t2); + tensor t3 = 3*t2 + t1; std::cout << "t3 = " << t3 << std::endl; + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the instantiate_tensor_dynamic function of instantiate-tensor." << std::endl; + throw; + } } -void instantiate_dynamic_tensors_with_static_order() +void instantiate_tensor_dynamics_with_static_order() { + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = boost::numeric::ublas::layout::first_order; // storage format + using tensor = boost::numeric::ublas::tensor_static_rank; + constexpr auto ones = ublas::ones_static_rank{}; - constexpr auto order = 3U; - using value_t = float; - using format_t = boost::numeric::ublas::layout::first_order; // storage format - using tensor_t = boost::numeric::ublas::fixed_rank_tensor; - using shape_t = typename tensor_t::extents_type; + try { + // tensor type has static order and dynamic dimensions + // elements are stored contiguously in memory using the 1st-format (column-major) - // tensor type has static order and dynamic dimensions - // elements are stored contiguously in memory using the 1st-format (column-major) + auto t1 = ones(3,4,2); + std::cout << "t1 = " << t1 << std::endl; - auto t1 = tensor_t{3,4,2}; - std::cout << "t1 = " << t1 << std::endl; + tensor t2 = 2*ones(3,4,2); + std::cout << "t2 = " << t2 << std::endl; - auto t2 = tensor_t(shape_t{3,4,2},2.0F); - std::cout << "t2 = " << t2 << std::endl; + tensor t3 = 3*t2 + t1; + std::cout << "t3 = " << t3 << std::endl; - auto t3 = tensor_t(t2); - std::cout << "t3 = " << t3 << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the instantiate_tensor_dynamic function of instantiate-tensor." << std::endl; + throw; + } } -void instantiate_static_tensor() +void instantiate_tensor_static() { - using value_t = float; - using format_t = boost::numeric::ublas::layout::first_order; // storage format - using shape_t = boost::numeric::ublas::static_extents<3U,4U,2U>; - using tensor_t = boost::numeric::ublas::static_tensor; + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = ublas::layout::first_order; // storage format + using shape = ublas::extents<3,4,2>; + using tensor = ublas::tensor_static; + constexpr auto ones = ublas::ones_static{}; - // tensor type has static order and static dimensions - // elements are stored contiguously in memory using the 1st-format (column-major) + try { + // tensor type has static order and static dimensions + // elements are stored contiguously in memory using the 1st-format (column-major) - auto t1 = tensor_t{}; - std::cout << "t1 = " << t1 << std::endl; + auto t1 = tensor{}; + std::cout << "t1 = " << t1 << std::endl; + + tensor t2 = 2 * ones(shape{}); + std::cout << "t2 = " << t2 << std::endl; - auto t2 = tensor_t(2.0F); - std::cout << "t2 = " << t2 << std::endl; + tensor t3 = 3*t2 + t1; + std::cout << "t3 = " << t3 << std::endl; - auto t3 = tensor_t(t2); - std::cout << "t3 = " << t3 << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the instantiate_tensor_dynamic function of instantiate-tensor." << std::endl; + throw; + } } int main() { - instantiate_dynamic_tensor(); - instantiate_dynamic_tensors_with_static_order(); - instantiate_static_tensor(); + try{ + instantiate_tensor_dynamic(); + instantiate_tensor_dynamics_with_static_order(); + instantiate_tensor_static(); + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of instantiate-tensor." << std::endl; + } } diff --git a/examples/tensor/multiply_tensors_einstein_notation.cpp b/examples/tensor/multiply_tensors_einstein_notation.cpp index 3609feb95..c7ba3c2c6 100644 --- a/examples/tensor/multiply_tensors_einstein_notation.cpp +++ b/examples/tensor/multiply_tensors_einstein_notation.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -18,131 +18,150 @@ int main() { - using namespace boost::numeric::ublas; - - using value_t = float; - using format_t = boost::numeric::ublas::layout::first_order; // storage format - using tensor_t = boost::numeric::ublas::dynamic_tensor; - using shape_t = typename tensor_t::extents_type; - - //using format_t = column_major; - //using value_t = float; - //using shape_t = dynamic_extents<>; - //using tensor_t = dynamic_tensor; - using matrix_t = matrix; - - - using namespace boost::numeric::ublas::index; - - // Tensor-Vector-Multiplications - Including Transposition - { - - auto n = shape_t{3,4,2}; - auto A = tensor_t(n,1); - auto B1 = matrix_t(n[1],n[2],2); - auto v1 = tensor_t(shape_t{n[0],1},2); - auto v2 = tensor_t(shape_t{n[1],1},2); -// auto v3 = tensor_t(shape{n[2],1},2); - - // C1(j,k) = B1(j,k) + A(i,j,k)*v1(i); - // tensor_t C1 = B1 + prod(A,vector_t(n[0],1),1); - tensor_t C1 = B1 + A(_i,_,_) * v1(_i,_); - - // C2(i,k) = A(i,j,k)*v2(j) + 4; - //tensor_t C2 = prod(A,vector_t(n[1],1),2) + 4; - tensor_t C2 = A(_,_i,_) * v2(_i,_) + 4; - - // not yet implemented! - // C3() = A(i,j,k)*T1(i)*T2(j)*T2(k); - // tensor_t C3 = prod(prod(prod(A,v1,1),v2,1),v3,1); - // tensor_t C3 = A(_i,_j,_k) * v1(_i,_) * v2(_j,_) * v3(_k,_); - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C1(j,k) = B1(j,k) + A(i,j,k)*v1(i);" << std::endl << std::endl; - std::cout << "C1=" << C1 << ";" << std::endl << std::endl; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C2(i,k) = A(i,j,k)*v2(j) + 4;" << std::endl << std::endl; - std::cout << "C2=" << C2 << ";" << std::endl << std::endl; - - } - - - // Tensor-Matrix-Multiplications - Including Transposition - { - auto n = shape_t{3,4,2}; - auto m = 5u; - auto A = tensor_t(n,2); - auto B = tensor_t(shape_t{n[1],n[2],m},2); - auto B1 = tensor_t(shape_t{m,n[0]},1); - auto B2 = tensor_t(shape_t{m,n[1]},1); - - - // C1(l,j,k) = B(j,k,l) + A(i,j,k)*B1(l,i); - // tensor_t C1 = B + prod(A,B1,1); - tensor_t C1 = B + A(_i,_,_) * B1(_,_i); - - // C2(i,l,k) = A(i,j,k)*B2(l,j) + 4; - // tensor_t C2 = prod(A,B2) + 4; - tensor_t C2 = A(_,_j,_) * B2(_,_j) + 4; - - // C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k); - // not yet implemented. - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C1(l,j,k) = B(j,k,l) + A(i,j,k)*B1(l,i);" << std::endl << std::endl; - std::cout << "C1=" << C1 << ";" << std::endl << std::endl; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C2(i,l,k) = A(i,j,k)*B2(l,j) + 4;" << std::endl << std::endl; - std::cout << "C2=" << C2 << ";" << std::endl << std::endl; - - // formatted output -// std::cout << "% --------------------------- " << std::endl; -// std::cout << "% --------------------------- " << std::endl << std::endl; -// std::cout << "% C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k);" << std::endl << std::endl; -// std::cout << "C3=" << C3 << ";" << std::endl << std::endl; - } - - - // Tensor-Tensor-Multiplications Including Transposition - { - auto na = shape_t{3,4,5}; - auto nb = shape_t{4,6,3,2}; - auto A = tensor_t(na,2); - auto B = tensor_t(nb,3); - auto T1 = tensor_t(shape_t{na[2],na[2]},2); - auto T2 = tensor_t(shape_t{na[2],nb[1],nb[3]},2); - - - // C1(j,l) = T1(j,l) + A(i,j,k)*A(i,j,l) + 5; - // tensor_t C1 = T1 + prod(A,A,perm_t{1,2}) + 5; - tensor_t C1 = T1 + A(_i,_j,_m)*A(_i,_j,_l) + 5; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C1(k,l) = T1(k,l) + A(i,j,k)*A(i,j,l) + 5;" << std::endl << std::endl; - std::cout << "C1=" << C1 << ";" << std::endl << std::endl; - - - // C2(k,l,m) = T2(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5; - //tensor_t C2 = T2 + prod(A,B,perm_t{1,2},perm_t{3,1}) + 5; - tensor_t C2 = T2 + A(_i,_j,_k)*B(_j,_l,_i,_m) + 5; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "% C2(k,l,m) = T2(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5;" << std::endl << std::endl; - std::cout << "C2=" << C2 << ";" << std::endl << std::endl; - - } + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = ublas::layout::first_order; // storage format + using tensor = ublas::tensor_dynamic; + using shape = typename tensor::extents_type; + using matrix = ublas::matrix; + + constexpr auto ones = ublas::ones{}; + + // NOLINTNEXTLINE(google-build-using-namespace) + using namespace boost::numeric::ublas::index; + + using namespace boost::numeric::ublas::index; + using tensor = boost::numeric::ublas::tensor_dynamic; + auto fones = boost::numeric::ublas::ones{}; + + + tensor X = fones(3,4,5); + tensor Y = fones(4,6,3,2); + + tensor Z = 2*ones(5,6,2) + X(_i,_j,_k)*Y(_j,_l,_i,_m) + 5; + + // Matlab Compatible Formatted Output + std::cout << "C=" << Z << ";" << std::endl; + + + // Tensor-Vector-Multiplications - Including Transposition + try { + + auto n = shape{3,4,2}; + + tensor A = ones(n); + matrix B1 = 2*matrix(n[1],n[2]); + tensor v1 = 2*ones(n[0],1); + tensor v2 = 2*ones(n[1],1); + // auto v3 = tensor(shape{n[2],1},2); + + // C1(j,k) = B1(j,k) + A(i,j,k)*v1(i); + // tensor C1 = B1 + prod(A,vector_t(n[0],1),1); + tensor C1 = B1 + A(_i,_,_) * v1(_i,_); + + // C2(i,k) = A(i,j,k)*v2(j) + 4; + //tensor C2 = prod(A,vector_t(n[1],1),2) + 4; + tensor C2 = A(_,_i,_) * v2(_i,_) + 4; + + // not yet implemented! + // C3() = A(i,j,k)*T1(i)*T2(j)*T2(k); + // tensor C3 = prod(prod(prod(A,v1,1),v2,1),v3,1); + // tensor C3 = A(_i,_j,_k) * v1(_i,_) * v2(_j,_) * v3(_k,_); + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C1(j,k) = B1(j,k) + A(i,j,k)*v1(i);" << std::endl << std::endl; + std::cout << "C1=" << C1 << ";" << std::endl << std::endl; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C2(i,k) = A(i,j,k)*v2(j) + 4;" << std::endl << std::endl; + std::cout << "C2=" << C2 << ";" << std::endl << std::endl; + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-einstein-notation when doing tensor-vector multiplication." << std::endl; + } + + // Tensor-Matrix-Multiplications - Including Transposition + try { + auto n = shape{3,4,2}; + auto m = 5u; + tensor A = 2*ones(n); + tensor B = 2*ones(n[1],n[2],m); + tensor B1 = ones(m,n[0]); + tensor B2 = ones(m,n[1]); + + + // C1(l,j,k) = B(j,k,l) + A(i,j,k)*B1(l,i); + // tensor C1 = B + prod(A,B1,1); + tensor C1 = B + A(_i,_,_) * B1(_,_i); + + // C2(i,l,k) = A(i,j,k)*B2(l,j) + 4; + // tensor C2 = prod(A,B2) + 4; + tensor C2 = A(_,_j,_) * B2(_,_j) + 4; + + // C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k); + // not yet implemented. + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C1(l,j,k) = B(j,k,l) + A(i,j,k)*B1(l,i);" << std::endl << std::endl; + std::cout << "C1=" << C1 << ";" << std::endl << std::endl; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C2(i,l,k) = A(i,j,k)*B2(l,j) + 4;" << std::endl << std::endl; + std::cout << "C2=" << C2 << ";" << std::endl << std::endl; + + // formatted output + // std::cout << "% --------------------------- " << std::endl; + // std::cout << "% --------------------------- " << std::endl << std::endl; + // std::cout << "% C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k);" << std::endl << std::endl; + // std::cout << "C3=" << C3 << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-einstein-notation when doing tensor-matrix multiplication." << std::endl; + } + + + // Tensor-Tensor-Multiplications Including Transposition + try { + auto na = shape{3,4,5}; + auto nb = shape{4,6,3,2}; + tensor A = 2*ones(na); + tensor B = 3*ones(nb); + tensor T1 = 2*ones(na[2],na[2]); + tensor T2 = 2*ones(na[2],nb[1],nb[3]); + + + // C1(j,l) = T1(j,l) + A(i,j,k)*A(i,j,l) + 5; + // tensor C1 = T1 + prod(A,A,perm_t{1,2}) + 5; + tensor C1 = T1 + A(_i,_j,_m)*A(_i,_j,_l) + 5; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C1(k,l) = T1(k,l) + A(i,j,k)*A(i,j,l) + 5;" << std::endl << std::endl; + std::cout << "C1=" << C1 << ";" << std::endl << std::endl; + + + // C2(k,l,m) = T2(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5; + //tensor C2 = T2 + prod(A,B,perm_t{1,2},perm_t{3,1}) + 5; + tensor C2 = T2 + A(_i,_j,_k)*B(_j,_l,_i,_m) + 5; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% C2(k,l,m) = T2(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5;" << std::endl << std::endl; + std::cout << "C2=" << C2 << ";" << std::endl << std::endl; + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-einstein-notation when doing transpose." << std::endl; + } } diff --git a/examples/tensor/multiply_tensors_product_function.cpp b/examples/tensor/multiply_tensors_product_function.cpp index 31a895370..bd2adb34a 100644 --- a/examples/tensor/multiply_tensors_product_function.cpp +++ b/examples/tensor/multiply_tensors_product_function.cpp @@ -10,44 +10,44 @@ // Google and Fraunhofer IOSB, Ettlingen, Germany // - -#include #include +#include #include #include void multiply_tensors_with_dynamic_order() { - using namespace boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; - using format_t = column_major; - using value_t = float; // std::complex; - using tensor_t = dynamic_tensor; - using shape_t = typename tensor_t::extents_type; - using matrix_t = matrix; - using vector_t = vector; + using layout = ublas::layout::first_order; + using value = float; // std::complex; + using tensor = ublas::tensor_dynamic; + using matrix = ublas::matrix; + using vector = ublas::vector; + using shape = typename tensor::extents_type; + constexpr auto ones = ublas::ones{}; // Tensor-Vector-Multiplications - Including Transposition - { + try { - auto n = shape_t{3,4,2}; - auto A = tensor_t(n,2); + auto n = shape{3,4,2}; + auto A = tensor(n,2); auto q = 0u; // contraction mode // C1(j,k) = T2(j,k) + A(i,j,k)*T1(i); q = 1u; - tensor_t C1 = matrix_t(n[1],n[2],2) + prod(A,vector_t(n[q-1],1),q); + tensor C1 = matrix(n[1],n[2],2) + ublas::prod(A,vector(n[q-1],1),q); // C2(i,k) = A(i,j,k)*T1(j) + 4; q = 2u; - tensor_t C2 = prod(A,vector_t(n[q-1],1),q) + 4; + tensor C2 = ublas::prod(A,vector(n[q-1],1),q) + 4; // C3() = A(i,j,k)*T1(i)*T2(j)*T2(k); - tensor_t C3 = prod(prod(prod(A,vector_t(n[0],1),1),vector_t(n[1],1),1),vector_t(n[2],1),1); + tensor C3 = ublas::prod(ublas::prod(ublas::prod(A,vector(n[0],1),1),vector(n[1],1),1),vector(n[2],1),1); // C4(i,j) = A(k,i,j)*T1(k) + 4; q = 1u; - tensor_t C4 = prod(trans(A,{2,3,1}),vector_t(n[2],1),q) + 4; + tensor C4 = ublas::prod(trans(A,{2,3,1}),vector(n[2],1),q) + 4; // formatted output @@ -74,35 +74,39 @@ void multiply_tensors_with_dynamic_order() std::cout << "% C4(i,j) = A(k,i,j)*T1(k) + 4;" << std::endl << std::endl; std::cout << "C4=" << C4 << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-product-function." << std::endl; } + // Tensor-Matrix-Multiplications - Including Transposition - { + try { - auto n = shape_t{3,4,2}; - auto A = tensor_t(n,2); + auto n = shape{3,4,2}; + tensor A = 2*ones(n);//tensor auto m = 5u; auto q = 0u; // contraction mode // C1(l,j,k) = T2(l,j,k) + A(i,j,k)*T1(l,i); q = 1u; - tensor_t C1 = tensor_t(shape_t{m,n[1],n[2]},2) + prod(A,matrix_t(m,n[q-1],1),q); + tensor C1 = 2*ones(m,n[1],n[2]) + ublas::prod(A,matrix(m,n[q-1],1),q); // C2(i,l,k) = A(i,j,k)*T1(l,j) + 4; q = 2u; - tensor_t C2 = prod(A,matrix_t(m,n[q-1],1),q) + 4; + tensor C2 = ublas::prod(A,matrix(m,n[q-1],1),q) + 4; // C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k); q = 3u; - tensor_t C3 = prod(prod(A,matrix_t(m+1,n[q-2],1),q-1),matrix_t(m+2,n[q-1],1),q); + tensor C3 = ublas::prod(ublas::prod(A,matrix(m+1,n[q-2],1),q-1),matrix(m+2,n[q-1],1),q); // C4(i,l1,l2) = A(i,j,k)*T2(l2,k)*T1(l1,j); - tensor_t C4 = prod(prod(A,matrix_t(m+2,n[q-1],1),q),matrix_t(m+1,n[q-2],1),q-1); + tensor C4 = ublas::prod(ublas::prod(A,matrix(m+2,n[q-1],1),q),matrix(m+1,n[q-2],1),q-1); // C5(i,k,l) = A(i,k,j)*T1(l,j) + 4; q = 3u; - tensor_t C5 = prod(trans(A,{1,3,2}),matrix_t(m,n[1],1),q) + 4; + tensor C5 = ublas::prod(trans(A,{1,3,2}),matrix(m,n[1],1),q) + 4; // formatted output std::cout << "% --------------------------- " << std::endl; @@ -135,6 +139,9 @@ void multiply_tensors_with_dynamic_order() std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "% C5(i,k,l) = A(i,k,j)*T1(l,j) + 4;" << std::endl << std::endl; std::cout << "C5=" << C5 << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the multiply_tensors_with_dynamic_order function of multiply-tensor-product-function." << std::endl; } @@ -142,18 +149,18 @@ void multiply_tensors_with_dynamic_order() // Tensor-Tensor-Multiplications Including Transposition - { + try { using perm_t = std::vector; - auto na = shape_t{3,4,5}; - auto nb = shape_t{4,6,3,2}; - auto A = tensor_t(na,2); - auto B = tensor_t(nb,3); + auto na = shape{3,4,5}; + auto nb = shape{4,6,3,2}; + tensor A = 2*ones(na); //tensor(na,2); + tensor B = 3*ones(nb); //tensor(nb,3); // C1(j,l) = T(j,l) + A(i,j,k)*A(i,j,l) + 5; - tensor_t C1 = tensor_t(shape_t{na[2],na[2]},2) + prod(A,A,perm_t{1,2}) + 5.0F; + tensor C1 = 2*ones(na[2],na[2]) + ublas::prod(A,A,perm_t{1,2}) + 5; // formatted output std::cout << "% --------------------------- " << std::endl; @@ -163,7 +170,7 @@ void multiply_tensors_with_dynamic_order() // C2(k,l,m) = T(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5; - tensor_t C2 = tensor_t(shape_t{na[2],nb[1],nb[3]},2) + prod(A,B,perm_t{1,2},perm_t{3,1}) + 5; + tensor C2 = 2*ones(na[2],nb[1],nb[3]) + ublas::prod(A,B,perm_t{1,2},perm_t{3,1}) + 5; // formatted output std::cout << "% --------------------------- " << std::endl; @@ -173,7 +180,7 @@ void multiply_tensors_with_dynamic_order() // C3(k,l,m) = T(k,l,m) + A(i,j,k)*trans(B(j,l,i,m),{2,3,1,4})+ 5; - tensor_t C3 = tensor_t(shape_t{na[2],nb[1],nb[3]},2) + prod(A,trans(B,{2,3,1,4}),perm_t{1,2}) + 5; + tensor C3 = 2*ones(na[2],nb[1],nb[3]) + ublas::prod(A,trans(B,{2,3,1,4}),perm_t{1,2}) + 5; // formatted output std::cout << "% --------------------------- " << std::endl; @@ -181,44 +188,48 @@ void multiply_tensors_with_dynamic_order() std::cout << "% C3(k,l,m) = T(k,l,m) + A(i,j,k)*trans(B(j,l,i,m),{2,3,1,4})+ 5;" << std::endl << std::endl; std::cout << "C3=" << C3 << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-product-function." << std::endl; } } void multiply_tensors_with_static_order() { - using namespace boost::numeric::ublas; - - using format_t = column_major; - using value_t = float; // std::complex; - using matrix_t = matrix; - using vector_t = vector; - using tensor2_t = fixed_rank_tensor; - using tensor3_t = fixed_rank_tensor; -// using tensor4_t = fixed_rank_tensor; -// using shape_t = typename tensor_t::extents_type; -// using shape2_t = typename tensor2_t::extents_type; - using shape3_t = typename tensor3_t::extents_type; -// using shape4_t = typename tensor4_t::extents_type; + namespace ublas = boost::numeric::ublas; + + using layout = ublas::layout::first_order; + using value = float; // std::complex; + using matrix = ublas::matrix; + using vector = ublas::vector; + using tensor2 = ublas::tensor_static_rank; + using tensor3 = ublas::tensor_static_rank; + using tensor4 = ublas::tensor_static_rank; + using shape2 = typename tensor2::extents_type; + using shape3 = typename tensor3::extents_type; + using shape4 = typename tensor4::extents_type; + + constexpr auto ones = ublas::ones_static_rank{}; // Tensor-Vector-Multiplications - Including Transposition // dynamic_extents with static rank - { + try { - auto n = shape3_t{3,4,2}; - auto A = tensor3_t(n,value_t(2)); + auto n = shape3{3,4,2}; + tensor3 A = 2*ones(n); auto q = 0U; // contraction mode // C1(j,k) = T2(j,k) + A(i,j,k)*T1(i); q = 1U; - tensor2_t C1 = matrix_t(n[1],n[2],2) + prod(A,vector_t(n[q-1],1),q); + tensor2 C1 = matrix(n[1],n[2],2) + ublas::prod(A,vector(n[q-1],1),q); // C2(i,k) = A(i,j,k)*T1(j) + 4; q = 2U; - tensor2_t C2 = prod(A,vector_t(n[q-1],1),q) + 4; + tensor2 C2 = ublas::prod(A,vector(n[q-1],1),q) + 4; // C3() = A(i,j,k)*T1(i)*T2(j)*T2(k); - tensor2_t C3 = prod(prod(prod(A,vector_t(n[0],1),1),vector_t(n[1],1),1),vector_t(n[2],1),1); + tensor2 C3 = ublas::prod(ublas::prod(ublas::prod(A,vector(n[0],1),1),vector(n[1],1),1),vector(n[2],1),1); // formatted output @@ -239,31 +250,34 @@ void multiply_tensors_with_static_order() std::cout << "% C3() = A(i,j,k)*T1(i)*T2(j)*T2(k);" << std::endl << std::endl; std::cout << "C3()=" << C3(0) << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-product-function." << std::endl; } // Tensor-Matrix-Multiplications - Including Transposition // dynamic_extents with static rank - { + try { - auto n = shape3_t{3,4,2}; - auto A = tensor3_t(n,value_t(2)); + auto n = shape3{3,4,2}; + tensor3 A = 2*ones(n); auto m = 5U; auto q = 0U; // contraction mode // C1(l,j,k) = T2(l,j,k) + A(i,j,k)*T1(l,i); q = 1U; - tensor3_t C1 = tensor3_t( shape3_t{m,n[1],n[2]},value_t(2) ) + prod(A,matrix_t(m,n[q-1],1),q); + tensor3 C1 = 2*ones(m,n[1],n[2]) + ublas::prod(A,matrix(m,n[q-1],1),q); // C2(i,l,k) = A(i,j,k)*T1(l,j) + 4; q = 2U; - tensor3_t C2 = prod(A,matrix_t(m,n[q-1],1),q) + 4 ; + tensor3 C2 = ublas::prod(A,matrix(m,n[q-1],1),q) + 4 ; // C3(i,l1,l2) = A(i,j,k)*T1(l1,j)*T2(l2,k); q = 3U; - tensor3_t C3 = prod(prod(A,matrix_t(m+1,n[q-2],1),q-1),matrix_t(m+2,n[q-1],1),q) ; + tensor3 C3 = ublas::prod(ublas::prod(A,matrix(m+1,n[q-2],1),q-1),matrix(m+2,n[q-1],1),q) ; // C4(i,l1,l2) = A(i,j,k)*T2(l2,k)*T1(l1,j); - tensor3_t C4 = prod(prod(A,matrix_t(m+2,n[q-1],1),q),matrix_t(m+1,n[q-2],1),q-1) ; + tensor3 C4 = ublas::prod(ublas::prod(A,matrix(m+2,n[q-1],1),q),matrix(m+1,n[q-2],1),q-1) ; // formatted output std::cout << "% --------------------------- " << std::endl; @@ -288,57 +302,69 @@ void multiply_tensors_with_static_order() std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "% C4(i,l1,l2) = A(i,j,k)*T2(l2,k)*T1(l1,j);" << std::endl << std::endl; std::cout << "C4=" << C4 << ";" << std::endl << std::endl; - std::cout << "% C3 and C4 should have the same values, true? " << std::boolalpha << (C3 == C4) << "!" << std::endl; + //std::cout << "% C3 and C4 should have the same values, true? " << std::boolalpha << (C3 == C4) << "!" << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-product-function." << std::endl; } // Tensor-Tensor-Multiplications Including Transposition // dynamic_extents with static rank - { + try { -// using perm_t = std::array; + using perm_t = std::array; -// auto na = shape3_t{3,4,5}; -// auto nb = shape4_t{4,6,3,2}; -// auto nc = shape2_t{5,5}; -// auto A = tensor3_t(na,2.0F); -// auto B = tensor4_t(nb,3.0F); -// auto C = tensor2_t(nc,2.0F); + auto na = shape3{3,4,5}; + auto nb = shape4{4,6,3,2}; + auto nc = shape2{5,5}; + tensor3 A = 2*ones(na); + tensor4 B = 3*ones(nb); + tensor2 C = 2*ones(nc); // C1(j,l) = T(j,l) + A(i,j,k)*A(i,j,l) + 5; // Right now there exist no tensor other than dynamic_extents with // dynamic rank so every tensor times tensor operator automatically // to dynamic tensor -// auto C1 = C + prod(A,A,perm_t{1,2}) + 5.0F; + auto C1 = C + ublas::prod(A,A,perm_t{1,2}) + 5; std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "% C1(k,l) = T(k,l) + A(i,j,k)*A(i,j,l) + 5;" << std::endl << std::endl; -// std::cout << "C1=" << tensor_t(C1) << ";" << std::endl << std::endl; + std::cout << "C1=" << tensor2(C1) << ";" << std::endl << std::endl; // C2(k,l,m) = T(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5; // Similar Problem as above -// tensor_t C2 = tensor_t(shape_t{na[2],nb[1],nb[3]},2.0F) + prod(A,B,perm_t{1,2},perm_t{3,1}) + 5.0F; + tensor3 C2 = 2*ones(na[2],nb[1],nb[3]) + ublas::prod(A,B,perm_t{1,2},perm_t{3,1}) + 5; std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "% C2(k,l,m) = T(k,l,m) + A(i,j,k)*B(j,l,i,m) + 5;" << std::endl << std::endl; - //std::cout << "C2=" << C2 << ";" << std::endl << std::endl; + std::cout << "C2=" << C2 << ";" << std::endl << std::endl; // C3(k,l,m) = T(k,l,m) + A(i,j,k)*trans(B(j,l,i,m),{2,3,1,4})+ 5; // Similar Problem as above -// tensor_t C3 = tensor_t(shape_t{na[2],nb[1],nb[3]},2.0F) + prod(A,trans(B,{2,3,1,4}),perm_t{1,2}) + 5.0F; + tensor3 C3 = 2*ones(na[2],nb[1],nb[3]) + ublas::prod(A,trans(B,{2,3,1,4}),perm_t{1,2}) + 5; std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "% C3(k,l,m) = T(k,l,m) + A(i,j,k)*trans(B(j,l,i,m),{2,3,1,4})+ 5;" << std::endl << std::endl; -// std::cout << "C3=" << C3 << ";" << std::endl << std::endl; + std::cout << "C3=" << C3 << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the multiply_tensors_with_static_order function of multiply-tensor-product-function." << std::endl; + throw; } } int main() { - multiply_tensors_with_dynamic_order(); - multiply_tensors_with_static_order(); + try { + multiply_tensors_with_dynamic_order(); + multiply_tensors_with_static_order(); + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of multiply-tensor-product-function." << std::endl; + } } diff --git a/examples/tensor/simple_expressions.cpp b/examples/tensor/simple_expressions.cpp index 229bfbf5f..81c6e1cf8 100644 --- a/examples/tensor/simple_expressions.cpp +++ b/examples/tensor/simple_expressions.cpp @@ -18,19 +18,23 @@ int main() { - using namespace boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; + using value = float; + using tensor = ublas::tensor_dynamic; + using matrix = ublas::matrix; + using vector = ublas::vector; + using shape = tensor::extents_type; - using tensorf = dynamic_tensor; - using matrixf = matrix; - using vectorf = vector; + try { - auto A = tensorf{3,4,2}; + + auto A = tensor{3,4,2}; auto B = A = 2; // Calling overloaded operators // and using simple tensor expression templates. if( A != (B+1) ){ - A += 2*B - 1; + A += 2*B - 1; } // formatted output @@ -38,14 +42,14 @@ int main() std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "A=" << A << ";" << std::endl << std::endl; - auto n = extents<>{3,4}; - auto D = matrixf(n[0],n[1],1); - auto e = vectorf(n[1],1); - auto f = vectorf(n[0],2); + auto n = shape{3,4}; + auto D = matrix(n[0],n[1],1); + auto e = vector(n[1],1); + auto f = vector(n[0],2); // Calling constructor with // vector expression templates - tensorf C = 2*f; + tensor C = 2*f; // formatted output std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; @@ -54,12 +58,16 @@ int main() // Calling overloaded operators // and mixing simple tensor and matrix expression templates - tensorf F = 3*C + 4*prod(2*D,e); + tensor F = 3*C + 4*prod(2*D,e); // formatted output std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "F=" << F << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of simple expression." << std::endl; + } } diff --git a/include/boost/numeric/ublas/tensor.hpp b/include/boost/numeric/ublas/tensor.hpp index b1fa1f428..e076aebac 100644 --- a/include/boost/numeric/ublas/tensor.hpp +++ b/include/boost/numeric/ublas/tensor.hpp @@ -1,5 +1,4 @@ -// Copyright (c) 2018-2019 -// Cem Bassoy +// Copyright (c) 2018 Cem Bassoy // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at diff --git a/include/boost/numeric/ublas/tensor/algorithms.hpp b/include/boost/numeric/ublas/tensor/algorithms.hpp index 8ca98ba61..66375e1c0 100644 --- a/include/boost/numeric/ublas/tensor/algorithms.hpp +++ b/include/boost/numeric/ublas/tensor/algorithms.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,17 +10,14 @@ // -#ifndef _BOOST_UBLAS_TENSOR_ALGORITHMS_HPP -#define _BOOST_UBLAS_TENSOR_ALGORITHMS_HPP +#ifndef BOOST_UBLAS_TENSOR_ALGORITHMS_HPP +#define BOOST_UBLAS_TENSOR_ALGORITHMS_HPP - -#include #include #include +#include -namespace boost { -namespace numeric { -namespace ublas { +namespace boost::numeric::ublas { @@ -38,37 +35,45 @@ namespace ublas { */ template constexpr void copy(const SizeType p, SizeType const*const n, - PointerOut c, SizeType const*const wc, - PointerIn a, SizeType const*const wa) + PointerOut c, SizeType const*const wc, + PointerIn a, SizeType const*const wa) { - static_assert( std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::copy: Argument types for pointers are not pointer types."); - if( p == 0 ) - return; - - if(c == nullptr || a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); - - if(wc == nullptr || wa == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); - - if(n == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); - - - std::function lambda; - - lambda = [&lambda, n, wc, wa](SizeType r, PointerOut c, PointerIn a) - { - if(r > 0) - for(auto d = 0u; d < n[r]; c += wc[r], a += wa[r], ++d) - lambda(r-1, c, a ); - else - for(auto d = 0u; d < n[0]; c += wc[0], a += wa[0], ++d) - *c = *a; - }; - - lambda( p-1, c, a ); + static_assert( std::is_pointer::value & std::is_pointer::value, + "Static error in boost::numeric::ublas::copy: Argument types for pointers are not pointer types."); + if( p == 0 ){ + return; + } + + if(c == nullptr || a == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); + } + + if(wc == nullptr || wa == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); + } + + if(n == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::copy: Pointers shall not be null pointers."); + } + + + std::function lambda; + + lambda = [&lambda, n, wc, wa](SizeType r, PointerOut c, PointerIn a) + { + if(r > 0){ + for(auto d = 0u; d < n[r]; c += wc[r], a += wa[r], ++d){ + lambda(r-1, c, a ); + } + } + else{ + for(auto d = 0u; d < n[0]; c += wc[0], a += wa[0], ++d){ + *c = *a; + } + } + }; + + lambda( p-1, c, a ); } @@ -87,40 +92,40 @@ constexpr void copy(const SizeType p, SizeType const*const n, * @param[in] op unary operation */ template -constexpr void transform(const SizeType p, - SizeType const*const n, - PointerOut c, SizeType const*const wc, - PointerIn a, SizeType const*const wa, - UnaryOp op) +constexpr void transform(SizeType const p, + SizeType const*const n, + PointerOut c, SizeType const*const wc, + PointerIn a, SizeType const*const wa, + UnaryOp op) { - static_assert( std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); - if( p == 0 ) - return; + static_assert( std::is_pointer::value & std::is_pointer::value, + "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); + if( p == 0 ) + return; - if(c == nullptr || a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(c == nullptr || a == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(wc == nullptr || wa == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(wc == nullptr || wa == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(n == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(n == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - std::function lambda; + std::function lambda; - lambda = [&lambda, n, wc, wa, op](SizeType r, PointerOut c, PointerIn a) - { - if(r > 0) - for(auto d = 0u; d < n[r]; c += wc[r], a += wa[r], ++d) - lambda(r-1, c, a); - else - for(auto d = 0u; d < n[0]; c += wc[0], a += wa[0], ++d) - *c = op(*a); - }; + lambda = [&lambda, n, wc, wa, op](SizeType r, PointerOut c, PointerIn a) + { + if(r > 0) + for(auto d = 0u; d < n[r]; c += wc[r], a += wa[r], ++d) + lambda(r-1, c, a); + else + for(auto d = 0u; d < n[0]; c += wc[0], a += wa[0], ++d) + *c = op(*a); + }; - lambda( p-1, c, a ); + lambda( p-1, c, a ); } @@ -138,39 +143,39 @@ constexpr void transform(const SizeType p, template [[nodiscard]] constexpr ValueType accumulate(SizeType const p, SizeType const*const n, - PointerIn a, SizeType const*const w, - ValueType k) + PointerIn a, SizeType const*const w, + ValueType k) { - static_assert(std::is_pointer::value, - "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); + static_assert(std::is_pointer::value, + "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); - if( p == 0 ) - return k; + if( p == 0 ) + return k; - if(a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(a == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(w == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(w == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(n == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(n == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - std::function lambda; + std::function lambda; - lambda = [&lambda, n, w](SizeType r, PointerIn a, ValueType k) - { - if(r > 0u) - for(auto d = 0u; d < n[r]; a += w[r], ++d) - k = lambda(r-1, a, k); - else - for(auto d = 0u; d < n[0]; a += w[0], ++d) - k += *a; - return k; - }; + lambda = [&lambda, n, w](SizeType r, PointerIn a, ValueType k) + { + if(r > 0u) + for(auto d = 0u; d < n[r]; a += w[r], ++d) + k = lambda(r-1, a, k); + else + for(auto d = 0u; d < n[0]; a += w[0], ++d) + k += *a; + return k; + }; - return lambda( p-1, a, k ); + return lambda( p-1, a, k ); } /** @brief Performs a reduce operation with all elements of the tensor and an initial value @@ -188,40 +193,40 @@ constexpr ValueType accumulate(SizeType const p, SizeType const*const n, template [[nodiscard]] constexpr ValueType accumulate(SizeType const p, SizeType const*const n, - PointerIn a, SizeType const*const w, - ValueType k, BinaryOp op) + PointerIn a, SizeType const*const w, + ValueType k, BinaryOp op) { - static_assert(std::is_pointer::value, - "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); + static_assert(std::is_pointer::value, + "Static error in boost::numeric::ublas::transform: Argument types for pointers are not pointer types."); - if( p == 0 ) - return k; + if( p == 0 ) + return k; - if(a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(a == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(w == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(w == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - if(n == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); + if(n == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::transform: Pointers shall not be null pointers."); - std::function lambda; + std::function lambda; - lambda = [&lambda, n, w, op](SizeType r, PointerIn a, ValueType k) - { - if(r > 0u) - for(auto d = 0u; d < n[r]; a += w[r], ++d) - k = lambda(r-1, a, k); - else - for(auto d = 0u; d < n[0]; a += w[0], ++d) - k = op ( k, *a ); - return k; - }; + lambda = [&lambda, n, w, op](SizeType r, PointerIn a, ValueType k) + { + if(r > 0u) + for(auto d = 0u; d < n[r]; a += w[r], ++d) + k = lambda(r-1, a, k); + else + for(auto d = 0u; d < n[0]; a += w[0], ++d) + k = op ( k, *a ); + return k; + }; - return lambda( p-1, a, k ); + return lambda( p-1, a, k ); } /** @brief Transposes a tensor @@ -241,45 +246,45 @@ constexpr ValueType accumulate(SizeType const p, SizeType const*const n, template constexpr void trans( SizeType const p, SizeType const*const na, SizeType const*const pi, - PointerOut c, SizeType const*const wc, - PointerIn a, SizeType const*const wa) + PointerOut c, SizeType const*const wc, + PointerIn a, SizeType const*const wa) { - static_assert( std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::trans: Argument types for pointers are not pointer types."); + static_assert( std::is_pointer::value & std::is_pointer::value, + "Static error in boost::numeric::ublas::trans: Argument types for pointers are not pointer types."); - if( p < 2) - return; + if( p < 2) + return; - if(c == nullptr || a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + if(c == nullptr || a == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - if(na == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null."); + if(na == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null."); - if(wc == nullptr || wa == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + if(wc == nullptr || wa == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - if(na == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + if(na == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - if(pi == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + if(pi == nullptr) + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - std::function lambda; + std::function lambda; - lambda = [&lambda, na, wc, wa, pi](SizeType r, PointerOut c, PointerIn a) - { - if(r > 0) - for(auto d = 0u; d < na[r]; c += wc[pi[r]-1], a += wa[r], ++d) - lambda(r-1, c, a); - else - for(auto d = 0u; d < na[0]; c += wc[pi[0]-1], a += wa[0], ++d) - *c = *a; - }; + lambda = [&lambda, na, wc, wa, pi](SizeType r, PointerOut c, PointerIn a) + { + if(r > 0) + for(auto d = 0u; d < na[r]; c += wc[pi[r]-1], a += wa[r], ++d) + lambda(r-1, c, a); + else + for(auto d = 0u; d < na[0]; c += wc[pi[0]-1], a += wa[0], ++d) + *c = *a; + }; - lambda( p-1, c, a ); + lambda( p-1, c, a ); } @@ -299,49 +304,47 @@ constexpr void trans( SizeType const p, SizeType const*const na, SizeType const */ template -constexpr void trans( SizeType const p, - SizeType const*const na, - SizeType const*const pi, - std::complex* c, SizeType const*const wc, - std::complex* a, SizeType const*const wa) +constexpr void trans(SizeType const p, + SizeType const*const na, + SizeType const*const pi, + std::complex* c, SizeType const*const wc, + std::complex* a, SizeType const*const wa) { - if( p < 2) - return; - - if(c == nullptr || a == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - - if(wc == nullptr || wa == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - - if(na == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - - if(pi == nullptr) - throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); - - - std::function* c, std::complex* a)> lambda; - - lambda = [&lambda, na, wc, wa, pi](SizeType r, std::complex* c, std::complex* a) - { - if(r > 0) - for(auto d = 0u; d < na[r]; c += wc[pi[r]-1], a += wa[r], ++d) - lambda(r-1, c, a); - else - for(auto d = 0u; d < na[0]; c += wc[pi[0]-1], a += wa[0], ++d) - *c = std::conj(*a); - }; - - lambda( p-1, c, a ); + if( p < 2){ + return; + } + if(c == nullptr || a == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + } + if(wc == nullptr || wa == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + } + if(na == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + } + if(pi == nullptr){ + throw std::runtime_error("Error in boost::numeric::ublas::trans: Pointers shall not be null pointers."); + } + + + std::function* c, std::complex* a)> lambda; + + lambda = [&lambda, na, wc, wa, pi](SizeType r, std::complex* c, std::complex* a) + { + if(r > 0) + for(auto d = 0u; d < na[r]; c += wc[pi[r]-1], a += wa[r], ++d) + lambda(r-1, c, a); + else + for(auto d = 0u; d < na[0]; c += wc[pi[0]-1], a += wa[0], ++d) + *c = std::conj(*a); + }; + + lambda( p-1, c, a ); } +} // namespace boost::numeric::ublas -} -} -} - #endif diff --git a/include/boost/numeric/ublas/tensor/concepts.hpp b/include/boost/numeric/ublas/tensor/concepts.hpp new file mode 100644 index 000000000..70820484a --- /dev/null +++ b/include/boost/numeric/ublas/tensor/concepts.hpp @@ -0,0 +1,34 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#ifndef BOOST_UBLAS_TENSOR_CONCEPTS_HPP +#define BOOST_UBLAS_TENSOR_CONCEPTS_HPP + +#include + +namespace boost::numeric::ublas{ + +template +concept integral = std::is_integral_v; + +template +concept signed_integral = integral && std::is_signed_v; + +template +concept unsigned_integral = integral && !signed_integral; + +template +concept floating_point = std::is_floating_point_v; + +} // namespace boost::numeric::ublas + +#endif // BOOST_UBLAS_TENSOR_CONCEPTS_BASIC_HPP diff --git a/include/boost/numeric/ublas/tensor/dynamic_extents.hpp b/include/boost/numeric/ublas/tensor/dynamic_extents.hpp deleted file mode 100644 index b8d2bdeb5..000000000 --- a/include/boost/numeric/ublas/tensor/dynamic_extents.hpp +++ /dev/null @@ -1,241 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - - -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_DYNAMIC_EXTENTS_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_DYNAMIC_EXTENTS_HPP_ - -#include -#include -#include -#include -#include -#include - -namespace boost { -namespace numeric { -namespace ublas { - -/** @brief Template class for storing tensor extents with runtime variable size. - * - * Proxy template class of std::vector. - * - */ -template -class basic_extents -{ - static_assert( std::numeric_limits::value_type>::is_integer, "Static error in basic_layout: type must be of type integer."); - static_assert(!std::numeric_limits::value_type>::is_signed, "Static error in basic_layout: type must be of type unsigned integer."); - -public: - using base_type = std::vector; - using value_type = typename base_type::value_type; - using const_reference = typename base_type::const_reference; - using reference = typename base_type::reference; - using size_type = typename base_type::size_type; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - - /** @brief Default constructs basic_extents - * - * @code auto ex = basic_extents{}; - */ - constexpr basic_extents() = default; - - /** @brief Copy constructs basic_extents from a one-dimensional container - * - * @code auto ex = basic_extents( std::vector(3u,3u) ); - * - * @note checks if size > 1 and all elements > 0 - * - * @param b one-dimensional container of type std::vector - */ - explicit basic_extents(base_type b) - : _base(std::move(b)) - { - if (!is_valid(*this)){ - throw std::length_error("Error in basic_extents::basic_extents() : shape tuple is not a valid permutation: has zero elements."); - } - } - - /** @brief Constructs basic_extents from an initializer list - * - * @code auto ex = basic_extents{3,2,4}; - * - * @note checks if size > 1 and all elements > 0 - * - * @param l one-dimensional list of type std::initializer - */ - basic_extents(std::initializer_list l) - : basic_extents( base_type(std::move(l)) ) - { - } - - /** @brief Constructs basic_extents from a range specified by two iterators - * - * @code auto ex = basic_extents(a.begin(), a.end()); - * - * @note checks if size > 1 and all elements > 0 - * - * @param first iterator pointing to the first element - * @param last iterator pointing to the next position after the last element - */ - constexpr basic_extents(const_iterator first, const_iterator last) - : basic_extents ( base_type( first,last ) ) - { - } - - /** @brief Copy constructs basic_extents */ - constexpr basic_extents(basic_extents const& l ) - : _base(l._base) - { - } - - /** @brief Move constructs basic_extents */ - constexpr basic_extents(basic_extents && l ) noexcept - : _base(std::move(l._base)) - { - } - - - template - constexpr basic_extents(OtherExtents const& e) - : _base(e.size()) - { - static_assert( is_extents_v, "boost::numeric::ublas::basic_extents(OtherExtents const&) : " - "OtherExtents should be a valid tensor extents" - ); - std::copy(e.begin(),e.end(), _base.begin()); - } - - ~basic_extents() = default; - - constexpr basic_extents& operator=(basic_extents && other) - noexcept(std::is_nothrow_swappable_v) - { - swap (*this, other); - return *this; - } - constexpr basic_extents& operator=(basic_extents const& other) - noexcept(std::is_nothrow_swappable_v) - { - basic_extents temp(other); - swap (*this, temp); - return *this; - } - - friend void swap(basic_extents& lhs, basic_extents& rhs) - noexcept(std::is_nothrow_swappable_v) - { - std::swap(lhs._base , rhs._base ); - } - - [[nodiscard]] inline - constexpr const_pointer data() const noexcept - { - return this->_base.data(); - } - - [[nodiscard]] inline - constexpr const_reference operator[] (size_type p) const - { - return this->_base[p]; - } - - [[nodiscard]] inline - constexpr const_reference at (size_type p) const - { - return this->_base.at(p); - } - - [[nodiscard]] inline - constexpr reference operator[] (size_type p) - { - return this->_base[p]; - } - - [[nodiscard]] inline - constexpr reference at (size_type p) - { - return this->_base.at(p); - } - - [[nodiscard]] inline - constexpr const_reference back () const - { - return this->_base.back(); - } - - - [[nodiscard]] inline - constexpr bool empty() const noexcept - { - return this->_base.empty(); - } - - [[nodiscard]] inline - constexpr size_type size() const noexcept - { - return this->_base.size(); - } - - inline - constexpr void clear() noexcept - { - this->_base.clear(); - } - - [[nodiscard]] inline - constexpr const_iterator - begin() const noexcept - { - return _base.begin(); - } - - [[nodiscard]] inline - constexpr const_iterator - end() const noexcept - { - return _base.end(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rbegin() const noexcept - { - return _base.rbegin(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rend() const noexcept - { - return _base.rend(); - } - - [[nodiscard]] inline - constexpr base_type const& base() const noexcept { return _base; } - -private: - - base_type _base{}; - -}; - -} // namespace ublas -} // namespace numeric -} // namespace boost - - -#endif diff --git a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp deleted file mode 100644 index 3f26eb345..000000000 --- a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp +++ /dev/null @@ -1,219 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// -/// \file strides.hpp Definition for the basic_strides template class - - -#ifndef _BOOST_UBLAS_TENSOR_DYNAMIC_STRIDES_HPP_ -#define _BOOST_UBLAS_TENSOR_DYNAMIC_STRIDES_HPP_ - -#include -#include -#include -#include - -namespace boost { -namespace numeric { -namespace ublas { - -template -class basic_extents; - - -/** @brief Template class for storing tensor strides for iteration with runtime variable size. - * - * Proxy template class of std::vector. - * - */ -template -class basic_strides -{ -public: - - using base_type = std::vector<__int_type>; - - static_assert( std::numeric_limits::is_integer, - "Static error in boost::numeric::ublas::basic_strides: type must be of type integer."); - static_assert(!std::numeric_limits::is_signed, - "Static error in boost::numeric::ublas::basic_strides: type must be of type unsigned integer."); - static_assert(std::is_same<__layout,layout::first_order>::value || std::is_same<__layout,layout::last_order>::value, - "Static error in boost::numeric::ublas::basic_strides: layout type must either first or last order"); - - - using layout_type = __layout; - using value_type = typename base_type::value_type; - using reference = typename base_type::reference; - using const_reference = typename base_type::const_reference; - using size_type = typename base_type::size_type; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - - /** @brief Default constructs basic_strides - * - * @code auto ex = basic_strides{}; - */ - constexpr explicit basic_strides() = default; - - /** @brief Constructs basic_strides from basic_extents for the first- and last-order storage formats - * - * @code auto strides = basic_strides( basic_extents{2,3,4} ); - * - */ - template - constexpr basic_strides(ExtentsType const& s) - : _base(s.size(),1) - { - static_assert( is_extents_v, "boost::numeric::ublas::basic_strides(ExtentsType const&) : " - "ExtentsType is not a tensor extents" - ); - if( s.empty() ) - return; - - if( !is_valid(s) ) - throw std::runtime_error("Error in boost::numeric::ublas::basic_strides(ExtentsType const&) : " - "shape is not valid." - ); - - if( is_vector(s) || is_scalar(s) ) - return; - - if( this->size() < 2 ) - throw std::runtime_error("Error in boost::numeric::ublas::basic_strides(ExtentsType const&) : " - "size of strides must be greater or equal to 2." - ); - - - if constexpr (std::is_same::value){ - std::transform(s.begin(), s.end() - 1, _base.begin(), _base.begin() + 1, std::multiplies{}); - }else { - std::transform(s.rbegin(), s.rend() - 1, _base.rbegin(), _base.rbegin() + 1, std::multiplies{}); - } - } - - constexpr basic_strides(basic_strides const& l) - : _base(l._base) - {} - - constexpr basic_strides(basic_strides && l ) noexcept - : _base(std::move(l._base)) - {} - - constexpr basic_strides(base_type const& l ) - : _base(l) - {} - - constexpr basic_strides(base_type && l ) noexcept - : _base(std::move(l)) - {} - - ~basic_strides() = default; - - constexpr basic_strides& operator=(basic_strides&& other) - noexcept(std::is_nothrow_swappable_v) - { - swap (*this, other); - return *this; - } - - constexpr basic_strides& operator=(basic_strides const& other) - noexcept(std::is_nothrow_swappable_v) - { - basic_strides temp(other); - swap (*this, temp); - return *this; - } - - friend void swap(basic_strides& lhs, basic_strides& rhs) - noexcept(std::is_nothrow_swappable_v) - { - std::swap(lhs._base , rhs._base); - } - - [[nodiscard]] inline - constexpr const_reference operator[] (size_type p) const{ - return _base[p]; - } - - [[nodiscard]] inline - constexpr const_pointer data() const{ - return _base.data(); - } - - [[nodiscard]] inline - constexpr const_reference at (size_type p) const{ - return _base.at(p); - } - - [[nodiscard]] inline - constexpr const_reference back () const{ - return _base.back(); - } - - [[nodiscard]] inline - constexpr reference back (){ - return _base.back(); - } - - [[nodiscard]] inline - constexpr bool empty() const noexcept{ - return _base.empty(); - } - - [[nodiscard]] inline - constexpr size_type size() const noexcept{ - return _base.size(); - } - - [[nodiscard]] inline - constexpr const_iterator begin() const noexcept{ - return _base.begin(); - } - - [[nodiscard]] inline - constexpr const_iterator end() const noexcept{ - return _base.end(); - } - - inline - constexpr void clear() noexcept{ - this->_base.clear(); - } - - [[nodiscard]] inline - constexpr base_type const& base() const noexcept{ - return this->_base; - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rbegin() const noexcept - { - return _base.rbegin(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rend() const noexcept - { - return _base.rend(); - } - -private: - base_type _base{}; -}; - -} -} -} - -#endif diff --git a/include/boost/numeric/ublas/tensor/expression.hpp b/include/boost/numeric/ublas/tensor/expression.hpp index 9b3c44d2c..47d534010 100644 --- a/include/boost/numeric/ublas/tensor/expression.hpp +++ b/include/boost/numeric/ublas/tensor/expression.hpp @@ -14,13 +14,11 @@ #include #include -#include -#include -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +#include "tags.hpp" + +namespace boost::numeric::ublas::detail +{ /** @\brief base class for tensor expressions * @@ -67,16 +65,16 @@ struct binary_tensor_expression using size_type = typename tensor_type::size_type; - explicit constexpr binary_tensor_expression(expression_type_left const& l, expression_type_right const& r, binary_operation o) - : el(l) , er(r) , op(o) {} + explicit constexpr binary_tensor_expression(expression_type_left const& l, expression_type_right const& r, binary_operation o) : el(l) , er(r) , op(std::move(o)) {} + constexpr binary_tensor_expression(binary_tensor_expression&& l) noexcept = delete; + constexpr binary_tensor_expression& operator=(binary_tensor_expression&& l) noexcept = delete; + ~binary_tensor_expression() = default; + binary_tensor_expression() = delete; binary_tensor_expression(const binary_tensor_expression& l) = delete; - constexpr binary_tensor_expression(binary_tensor_expression&& l) noexcept - : el(l.el), er(l.er), op(std::move(l.op)) {} - constexpr binary_tensor_expression& operator=(binary_tensor_expression&& l) noexcept = default; binary_tensor_expression& operator=(binary_tensor_expression const& l) noexcept = delete; - ~binary_tensor_expression() = default; + [[nodiscard]] inline constexpr decltype(auto) operator()(size_type i) const { return op(el(i), er(i)); } @@ -135,24 +133,22 @@ struct unary_tensor_expression using self_type = unary_tensor_expression; using tensor_type = T; using expression_type = E; - + using unary_operation = OP; using derived_type = tensor_expression >; using size_type = typename tensor_type::size_type; - explicit constexpr unary_tensor_expression(E const& ee, OP o) : e(ee) , op(o) {} - constexpr unary_tensor_expression() = delete; - unary_tensor_expression(const unary_tensor_expression& l) = delete; - constexpr unary_tensor_expression(unary_tensor_expression&& l) noexcept - : e(l.e), op(std::move(l.op)) {} + explicit constexpr unary_tensor_expression(expression_type const& ee, unary_operation o) : e(ee) , op(std::move(o)) {} + constexpr unary_tensor_expression(unary_tensor_expression&& l) noexcept = delete; + constexpr unary_tensor_expression& operator=(unary_tensor_expression&& l) noexcept = delete; - constexpr unary_tensor_expression& operator=(unary_tensor_expression&& l) noexcept = default; - + constexpr unary_tensor_expression() = delete; + unary_tensor_expression(unary_tensor_expression const& l) = delete; unary_tensor_expression& operator=(unary_tensor_expression const& l) noexcept = delete; ~unary_tensor_expression() = default; - [[nodiscard]] inline - constexpr decltype(auto) operator()(size_type i) const { return op(e(i)); } + [[nodiscard]] inline constexpr + decltype(auto) operator()(size_type i) const { return op(e(i)); } E const& e; OP op; @@ -181,8 +177,6 @@ constexpr auto make_unary_tensor_expression( vector_expression const& e, OP o } -} -} -} -} -#endif +} // namespace boost::numeric::ublas::detail + +#endif // BOOST_UBLAS_TENSOR_EXPRESSIONS_HPP diff --git a/include/boost/numeric/ublas/tensor/expression_evaluation.hpp b/include/boost/numeric/ublas/tensor/expression_evaluation.hpp index a169b4e62..d29b6eabe 100644 --- a/include/boost/numeric/ublas/tensor/expression_evaluation.hpp +++ b/include/boost/numeric/ublas/tensor/expression_evaluation.hpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -13,20 +13,22 @@ #ifndef BOOST_UBLAS_TENSOR_EXPRESSIONS_EVALUATION_HPP #define BOOST_UBLAS_TENSOR_EXPRESSIONS_EVALUATION_HPP -#include #include -#include +#include + + + +#include "extents.hpp" +#include "extents/extents_functions.hpp" +#include "type_traits.hpp" namespace boost::numeric::ublas { -template +template class tensor_core; -template -class basic_extents; - -} +} // namespace boost::numeric::ublas namespace boost::numeric::ublas::detail { @@ -39,7 +41,7 @@ struct binary_tensor_expression; template struct unary_tensor_expression; -} +} // namespace boost::numeric::ublas::detail namespace boost::numeric::ublas::detail { @@ -66,11 +68,8 @@ struct has_tensor_types> } // namespace boost::numeric::ublas::detail -namespace boost::numeric::ublas::detail { - - - - +namespace boost::numeric::ublas::detail +{ /** @brief Retrieves extents of the tensor_core * @@ -166,91 +165,83 @@ constexpr auto& retrieve_extents(unary_tensor_expression const& expr) namespace boost::numeric::ublas::detail { -template +template [[nodiscard]] inline -constexpr auto all_extents_equal(tensor_core const& t, Extents const& extents) + constexpr auto all_extents_equal(tensor_core const& t, extents const& e) { - static_assert(is_extents_v, - "Error in boost::numeric::ublas::detail::all_extents_equal: extents passed should be of extents type." - ); - - return extents == t.extents(); + return ::operator==(e,t.extents()); } -template +template [[nodiscard]] -constexpr auto all_extents_equal(tensor_expression const& expr, Extents const& extents) +constexpr auto all_extents_equal(tensor_expression const& expr, extents const& e) { - static_assert(is_extents_v, - "Error in boost::numeric::ublas::detail::all_extents_equal: extents passed should be of extents type." - ); static_assert(detail::has_tensor_types>::value, "Error in boost::numeric::ublas::detail::all_extents_equal: Expression to evaluate should contain tensors."); auto const& cast_expr = static_cast(expr); + using ::operator==; + using ::operator!=; if constexpr ( std::is_same::value ) - if( extents != cast_expr.extents() ) + if( e != cast_expr.extents() ) return false; if constexpr ( detail::has_tensor_types::value ) - if ( !all_extents_equal(cast_expr, extents)) + if ( !all_extents_equal(cast_expr, e)) return false; return true; } -template +template [[nodiscard]] -constexpr auto all_extents_equal(binary_tensor_expression const& expr, Extents const& extents) +constexpr auto all_extents_equal(binary_tensor_expression const& expr, extents const& e) { - static_assert(is_extents_v, - "Error in boost::numeric::ublas::detail::all_extents_equal: extents passed should be of extents type." - ); - static_assert(detail::has_tensor_types>::value, "Error in boost::numeric::ublas::detail::all_extents_equal: Expression to evaluate should contain tensors."); + using ::operator==; + using ::operator!=; + if constexpr ( std::is_same::value ) - if(extents != expr.el.extents()) + if(e != expr.el.extents()) return false; if constexpr ( std::is_same::value ) - if(extents != expr.er.extents()) + if(e != expr.er.extents()) return false; if constexpr ( detail::has_tensor_types::value ) - if(!all_extents_equal(expr.el, extents)) + if(!all_extents_equal(expr.el, e)) return false; if constexpr ( detail::has_tensor_types::value ) - if(!all_extents_equal(expr.er, extents)) + if(!all_extents_equal(expr.er, e)) return false; return true; } -template +template [[nodiscard]] -constexpr auto all_extents_equal(unary_tensor_expression const& expr, Extents const& extents) +constexpr auto all_extents_equal(unary_tensor_expression const& expr, extents const& e) { - static_assert(is_extents_v, - "Error in boost::numeric::ublas::detail::all_extents_equal: extents passed should be of extents type." - ); - static_assert(detail::has_tensor_types>::value, "Error in boost::numeric::ublas::detail::all_extents_equal: Expression to evaluate should contain tensors."); + using ::operator==; + if constexpr ( std::is_same::value ) - if(extents != expr.e.extents()) + if(e != expr.e.extents()) return false; if constexpr ( detail::has_tensor_types::value ) - if(!all_extents_equal(expr.e, extents)) + if(!all_extents_equal(expr.e, e)) return false; return true; @@ -259,7 +250,8 @@ constexpr auto all_extents_equal(unary_tensor_expression const& expr, Ex } // namespace boost::numeric::ublas::detail -namespace boost::numeric::ublas::detail { +namespace boost::numeric::ublas::detail +{ /** @brief Evaluates expression for a tensor_core @@ -290,10 +282,10 @@ template const& expr) { - static_assert(is_valid_tensor_v && is_valid_tensor_v, - "boost::numeric::ublas::detail::eval(tensor_type&, tensor_expression const&) : " - "tensor_type and tensor_expresssion should be a valid tensor type" - ); +// static_assert(is_valid_tensor_v && is_valid_tensor_v, +// "boost::numeric::ublas::detail::eval(tensor_type&, tensor_expression const&) : " +// "tensor_type and tensor_expresssion should be a valid tensor type" +// ); static_assert(std::is_same_v, "boost::numeric::ublas::detail::eval(tensor_type&, tensor_expression const&) : " @@ -347,5 +339,5 @@ inline void eval(tensor_type& lhs, unary_fn const& fn) } -} +} // namespace boost::numeric::ublas::detail #endif diff --git a/include/boost/numeric/ublas/tensor/extents.hpp b/include/boost/numeric/ublas/tensor/extents.hpp new file mode 100644 index 000000000..74034264e --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents.hpp @@ -0,0 +1,53 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef BOOST_UBLAS_TENSOR_EXTENTS_HPP +#define BOOST_UBLAS_TENSOR_EXTENTS_HPP + +#include "extents/extents_base.hpp" +#include "extents/extents_dynamic_size.hpp" +#include "extents/extents_static_size.hpp" +#include "extents/extents_static.hpp" +#include "extents/extents_functions.hpp" +#include "extents/extents_static_functions.hpp" + + +template +bool operator==( + boost::numeric::ublas::extents const& lhs, + boost::numeric::ublas::extents const& rhs ) +{ + return size(lhs) == size(rhs) && std::equal( begin(lhs), end (lhs), begin(rhs) ); +} + +template +bool operator==( + boost::numeric::ublas::extents const& lhs, + boost::numeric::ublas::extents const& rhs ) +{ + return size(lhs) == size(rhs) && std::equal( begin(lhs), end (lhs), begin(rhs) ); +} + +template +bool operator!=( + boost::numeric::ublas::extents const& lhs, + boost::numeric::ublas::extents const& rhs ) +{ + return !( lhs == rhs) ; +} + +template +bool operator!=( + boost::numeric::ublas::extents const& lhs, + boost::numeric::ublas::extents const& rhs ) +{ + return !( lhs == rhs) ; +} + + +#endif // BOOST_UBLAS_TENSOR_EXTENTS_HPP diff --git a/include/boost/numeric/ublas/tensor/extents/extents_base.hpp b/include/boost/numeric/ublas/tensor/extents/extents_base.hpp new file mode 100644 index 000000000..40fc01846 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_base.hpp @@ -0,0 +1,54 @@ +// +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_BASE_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_BASE_HPP + +#include +//#include +#include + +#include "../concepts.hpp" + +namespace boost::numeric::ublas { + + +template +struct extents_base +{ + + using derived_type = D; + inline constexpr decltype(auto) operator()() const { return static_cast(*this); } + inline constexpr decltype(auto) operator()() { return static_cast< derived_type&>(*this); } + +}; + +template +class extents_core; + +template +using extents = extents_core; + +template struct is_extents : std::false_type {}; +template struct is_strides : std::false_type {}; +template struct is_dynamic : std::false_type {}; +template struct is_static : std::false_type {}; +template struct is_dynamic_rank : std::false_type {}; +template struct is_static_rank : std::false_type {}; + +template inline static constexpr bool const is_extents_v = is_extents::value; +template inline static constexpr bool const is_strides_v = is_strides::value; +template inline static constexpr bool const is_dynamic_v = is_dynamic::value; +template inline static constexpr bool const is_static_v = is_static ::value; +template inline static constexpr bool const is_dynamic_rank_v = is_dynamic_rank::value; +template inline static constexpr bool const is_static_rank_v = is_static_rank::value; + +} // namespace boost::numeric::ublas + +#endif // _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_BASE_HPP_ diff --git a/include/boost/numeric/ublas/tensor/extents/extents_dynamic_size.hpp b/include/boost/numeric/ublas/tensor/extents/extents_dynamic_size.hpp new file mode 100644 index 000000000..fe3266050 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_dynamic_size.hpp @@ -0,0 +1,154 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_DYNAMIC_SIZE_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_DYNAMIC_SIZE_HPP + +#include +#include +#include +#include +#include + +#include "extents_base.hpp" +#include "extents_functions.hpp" + +#include "../layout.hpp" +#include "../concepts.hpp" + +namespace boost::numeric::ublas { + + + +/** @brief Template class for storing tensor extents with runtime variable size. + * + * Proxy template class of std::vector. + * + */ +template +class extents_core + : public extents_base> +{ + using super_type = extents_base>; + +public: + using base_type = std::vector; + using value_type = typename base_type::value_type; + using const_reference = typename base_type::const_reference; + using reference = typename base_type::reference; + using size_type = typename base_type::size_type; + using const_pointer = typename base_type::const_pointer; + using const_iterator = typename base_type::const_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + + extents_core() = default; + + explicit extents_core(base_type b) + : _base(std::move(b)) + { + if (!ublas::is_valid(*this)){ + throw std::invalid_argument("in boost::numeric::ublas::extents<> : " + "could not intanstiate extents<> as provided extents are not valid."); + } + } + + /** @brief Constructs extents from an initializer list + * + * @code auto ex = extents<>{}; @endcode + * @code auto ex = extents<>{3,2,4}; @endcode + * + * @note checks if size > 1 and all elements > 0 + * + * @param l one-dimensional list of type std::initializer + */ + extents_core(std::initializer_list l) + : extents_core( base_type(l) ) + { + if (!ublas::is_valid(*this)){ + throw std::invalid_argument("in boost::numeric::ublas::extents<> : " + "could not intanstiate extents<> as provided extents are not valid."); + } + } + + /** @brief Constructs extents from a range specified by two iterators + * + * @code auto ex = extents<>(a.begin(), a.end()); + * + * @note checks if size > 1 and all elements > 0 + * + * @param first iterator pointing to the first element + * @param last iterator pointing to the next position after the last element + */ + + template + constexpr extents_core(InputIt first, InputIt last) + : extents_core ( base_type( first,last ) ) + { + if (!ublas::is_valid(*this)){ + throw std::invalid_argument("in boost::numeric::ublas::extents<> : " + "could not intanstiate extents<> as provided extents are not valid."); + } + } + + /** @brief Copy constructs extents */ + /*constexpr*/ extents_core(extents_core const& l ) + : _base(l._base) + { + } + + /** @brief Move constructs extents */ + /*constexpr*/ extents_core(extents_core && l ) noexcept + : _base(std::move(l._base)) + { + } + + ~extents_core() = default; + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + extents_core& operator=(extents_core other) + noexcept(std::is_nothrow_swappable_v) + { + swap (*this, other); + return *this; + } + + + friend void swap(extents_core& lhs, extents_core& rhs) + noexcept(std::is_nothrow_swappable_v) + { + std::swap(lhs._base,rhs._base); + } + + [[nodiscard]] inline /*constexpr*/ const_reference operator[] (size_type p) const { return this->_base[p]; } + [[nodiscard]] inline /*constexpr*/ const_reference at (size_type p) const { return this->_base.at(p); } + + [[nodiscard]] inline /*constexpr*/ auto size() const noexcept { return this->_base.size(); } + [[nodiscard]] inline /*constexpr*/ auto const& base() const noexcept { return this->_base; } + [[nodiscard]] inline /*constexpr*/ const_pointer data() const noexcept { return this->_base.data(); } +private: + base_type _base; +}; + +} // namespace boost::numeric::ublas + + +namespace boost::numeric::ublas{ +template struct is_extents < extents_core > : std::true_type {}; +template struct is_dynamic < extents_core > : std::true_type {}; +template struct is_dynamic_rank < extents_core > : std::true_type {}; + +} // namespace boost::numeric::ublas + + +#endif diff --git a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp new file mode 100644 index 000000000..85e64ff8f --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp @@ -0,0 +1,247 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP + + +#include +#include +//#include + +#include "../layout.hpp" +#include "../concepts.hpp" + +namespace boost::numeric::ublas +{ +template +class extents_core; + + +template +struct extents_base; + +template [[nodiscard]] constexpr inline auto front (extents_base const& e) noexcept -> typename D::const_reference { return e().base().front(); } +template [[nodiscard]] constexpr inline auto back (extents_base const& e) noexcept -> typename D::const_reference { return e().base().back(); } +template [[nodiscard]] constexpr inline auto begin (extents_base const& e) noexcept -> typename D::const_iterator { return e().base().begin(); } +template [[nodiscard]] constexpr inline auto end (extents_base const& e) noexcept -> typename D::const_iterator { return e().base().end(); } +template [[nodiscard]] constexpr inline auto cbegin(extents_base const& e) noexcept -> typename D::const_iterator { return e().base().cbegin(); } +template [[nodiscard]] constexpr inline auto cend (extents_base const& e) noexcept -> typename D::const_iterator { return e().base().cend(); } +template [[nodiscard]] constexpr inline auto rbegin(extents_base const& e) noexcept -> typename D::const_reverse_iterator { return e().base().rbegin(); } +template [[nodiscard]] constexpr inline auto rend (extents_base const& e) noexcept -> typename D::const_reverse_iterator { return e().base().rend(); } +template [[nodiscard]] constexpr inline auto empty (extents_base const& e) noexcept -> bool { return e().base().empty(); } +template [[nodiscard]] constexpr inline auto size (extents_base const& e) noexcept -> typename D::size_type { return e().base().size(); } + +} //namespace boost::numeric::ublas + + +namespace boost::numeric::ublas +{ + +/** @brief Returns true if extents equals ([m,n,...,l]) with m>0,n>0,...,l>0 */ +template +[[nodiscard]] inline constexpr bool is_valid(extents_base const& e) +{ + return std::all_of(begin(e),end(e), [](auto a){ return a>0UL; } ); +} + +/** @brief Returns true if extents equals (m,[n,...,l]) with m=1,n=1,...,l=1 */ +template +[[nodiscard]] inline constexpr bool is_scalar(extents_base const& e) +{ + return (size(e)>0) && std::all_of (cbegin(e),cend(e),[](auto a){return a==1ul;}); +} + +/** @brief Returns true if extents equals (m,[n,1,...,1]) with m>=1||n>=1 && m==1||n==1*/ +template +[[nodiscard]] inline constexpr bool is_vector(extents_base const& e) +{ + if (empty(e) ) {return false;} + if (size (e) == 1) {return front(e)>=1ul;} + + return + std::any_of(cbegin(e) ,cbegin(e)+2ul, [](auto a){return a>=1ul;}) && + std::any_of(cbegin(e) ,cbegin(e)+2ul, [](auto a){return a==1ul;}) && + std::all_of(cbegin(e)+2ul,cend (e) , [](auto a){return a==1ul;}); + +// std::any_of(cbegin(e) ,cbegin(e)+2, [](auto a){return a>=1UL;}) && // a>1UL +// std::any_of(cbegin(e) ,cbegin(e)+2, [](auto a){return a==1UL;}) && +// std::all_of(cbegin(e)+2,cend(e) , [](auto a){return a==1UL;}); +} + +/** @brief Returns true if (m,[n,1,...,1]) with m>=1 or n>=1 */ +template +[[nodiscard]] inline constexpr bool is_matrix(extents_base const& e) +{ + if (empty(e) ) {return false;} + if (size (e) == 1) {return front(e)>=1ul;} + + return std::any_of (cbegin(e) ,cbegin(e)+2, [](auto a){return a>=1ul;}) && // all_of > 1UL + std::all_of (cbegin(e)+2,cend(e) , [](auto a){return a==1ul;}); +} + +/** @brief Returns true if shape is has a tensor shape + * + * @returns true if is_valid & not (is_scalar&is_vector&is_matrix) + */ +template +[[nodiscard]] inline constexpr bool is_tensor(extents_base const& e) +{ + return size(e) > 2 && + std::all_of (cbegin(e) ,cbegin(e)+2, [](auto a){return a>=1ul;}) && // all_of > 1UL + std::any_of (cbegin(e)+2,cend(e) , [](auto a){return a> 1ul;}); +} + + + + + +/** @brief Computes the number of elements */ +template +[[nodiscard]] inline constexpr auto product( extents_base const& e ) +{ + if( empty(e) ){ + return std::size_t{0UL}; + } + + return std::accumulate(begin(e), end(e), std::size_t{1UL}, std::multiplies<>{}); +} + + +//template // std::inserter(out,out.begin()) +//inline constexpr +// void squeeze( +// extents_base const& in, +// extents_base const& out) +//{ +// if(e().size() < 2){ return; } + +// if(is_vector(e) || is_scalar(e)) { +// std::copy (in, in+2, out ); +// } +// else{ +// std::copy_if(in, in_end, out, [](auto a){return a!=1u;}); +// } +//} + +//template +//[[nodiscard]] inline bool operator==( +// extents_base const& lhs, +// extents_base const& rhs ) +//{ +// return size(lhs) == size(rhs) && std::equal( begin(lhs), end (lhs), begin(rhs) ); +//} + +//template +//[[nodiscard]] inline bool operator!=( +// extents_base const& lhs, +// extents_base const& rhs ) +//{ +// return !( lhs == rhs) ; +//} + +template +[[nodiscard]] inline auto to_strides(extents_core const& e, L /*unused*/) +{ + auto s = typename extents_core::base_type(e.size(),1ul); + + if(empty(e) || is_vector(e) || is_scalar(e)){ + return s; + } + if constexpr(std::is_same_v){ + std::transform(begin (e), end (e) - 1, s.begin (), s.begin ()+1, std::multiplies<>{}); + } else { + std::transform(rbegin(e), rend(e) - 1, s.rbegin(), s.rbegin()+1, std::multiplies<>{}); + } + return s; +} + +template +[[nodiscard]] inline auto to_strides(extents_core const& e, L /*unused*/) +{ + auto s = typename extents_core::base_type{}; + std::fill(s.begin(),s.end(),1ul); + + if(empty(e) || is_vector(e) || is_scalar(e)){ + return s; + } + if constexpr(std::is_same_v){ + std::transform(begin (e), end (e) - 1, s.begin (), s.begin ()+1, std::multiplies<>{}); + } else { + std::transform(rbegin(e), rend(e) - 1, s.rbegin(), s.rbegin()+1, std::multiplies<>{}); + } + return s; +} + + + +} // namespace boost::numeric::ublas + + +template +[[nodiscard]] inline constexpr bool operator==( + boost::numeric::ublas::extents_core const& lhs, + boost::numeric::ublas::extents_core const& rhs ) +{ + if constexpr(m != n) + return false; + return std::equal( begin(lhs), end (lhs), begin(rhs) ); +} + +template +[[nodiscard]] inline constexpr bool operator!=( + boost::numeric::ublas::extents_core const& lhs, + boost::numeric::ublas::extents_core const& rhs ) +{ + if constexpr(m == n) + return false; + return !(lhs == rhs) ; +} + +template +[[nodiscard]] inline constexpr bool operator==( + boost::numeric::ublas::extents_base const& lhs, + boost::numeric::ublas::extents_base const& rhs ) +{ + return size(lhs) == size(rhs) && std::equal( begin(lhs), end (lhs), begin(rhs) ); +} + +template +[[nodiscard]] inline constexpr bool operator!=( + boost::numeric::ublas::extents_base const& lhs, + boost::numeric::ublas::extents_base const& rhs ) +{ + return !( lhs == rhs) ; +} + + +namespace std +{ + +template +struct tuple_size< boost::numeric::ublas::extents_core > + : integral_constant::base_type>> +{}; + +template +[[nodiscard]] constexpr inline + auto get(boost::numeric::ublas::extents_core const& e) noexcept +{ + return std::get(e.base()); +} + +} // namespace std + + +#endif // _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ + + diff --git a/include/boost/numeric/ublas/tensor/extents/extents_static.hpp b/include/boost/numeric/ublas/tensor/extents/extents_static.hpp new file mode 100644 index 000000000..3f375b50d --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_static.hpp @@ -0,0 +1,78 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_HPP + +#include +#include +#include +#include + +#include "extents_functions.hpp" +#include "extents_base.hpp" +#include "../concepts.hpp" + + +namespace boost::numeric::ublas { + + +/** @brief Template class for storing tensor extents for compile time. + * + * @code extents<1,2,3,4> t @endcode + * + * @tparam e parameter pack of extents + * + */ +template +class extents_core + : public extents_base> +{ + static constexpr auto size = sizeof...(e)+2u; +public: + + using base_type = std::array; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using const_pointer = typename base_type::const_pointer; + using const_iterator = typename base_type::const_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + constexpr extents_core() = default; + constexpr extents_core(extents_core const&) noexcept = default; + constexpr extents_core(extents_core &&) noexcept = default; + constexpr extents_core& operator=(extents_core const&) noexcept = default; + constexpr extents_core& operator=(extents_core &&) noexcept = default; + ~extents_core() = default; + + [[nodiscard]] inline constexpr const_reference at (size_type k) const{ return m_data.at(k); } + [[nodiscard]] inline constexpr const_reference operator[](size_type k) const{ return m_data[k]; } + [[nodiscard]] inline constexpr base_type const& base() const noexcept{ return m_data; } + [[nodiscard]] inline constexpr const_pointer data () const noexcept{ return m_data.data(); } + +private: + static constexpr base_type const m_data{e1,e2,e...}; + +}; + +template struct is_extents < extents_core > : std::true_type {}; +template struct is_static < extents_core > : std::true_type {}; +template struct is_static_rank < extents_core > : std::true_type {}; + +} // namespace boost::numeric::ublas + + + + +#endif diff --git a/include/boost/numeric/ublas/tensor/extents/extents_static_functions.hpp b/include/boost/numeric/ublas/tensor/extents/extents_static_functions.hpp new file mode 100644 index 000000000..08b930d6f --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_static_functions.hpp @@ -0,0 +1,637 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_FUNCTIONS_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_FUNCTIONS_HPP + + +#include +#include +#include + +#include "extents_base.hpp" +#include "../layout.hpp" + + + +namespace boost::numeric::ublas +{ + + + +//////////////// SIZE /////////////// + +namespace detail { +template +struct size_impl_t; +template +struct size_impl_t> +{ static constexpr auto value = sizeof ...(es); }; +template +struct size_impl_t> +{ static constexpr auto value = sizeof ...(es); }; +} // namespace detail + +/** @brief Returns the size of a pure static extents type + * + * @code constexpr auto n = size_v>; + * @note corresponds to std::tuple_size_v + * +*/ +template +constexpr inline auto size_v = detail::size_impl_t>::value; + +//////////////// EMPTY /////////////// + +namespace detail { +template +struct empty_impl_t; + +template +struct empty_impl_t> +{ static constexpr bool value = size_v> == 0ul; }; +} // namespace detail + +/** @brief Returns if a pure static extents type is empty + * + * @code constexpr bool empty = empty_v>; // -> false + * +*/ +template +constexpr inline bool empty_v = detail::empty_impl_t>::value; + + +//////////////// GET ///////////////////// + +namespace detail { +template +struct get_impl_t; + +template +struct get_impl_t> +{ + static constexpr auto value = 0; +}; + +template +struct get_impl_t> +{ + static_assert ( j < n && k < n ); + static constexpr auto value = e1; +}; + +template +struct get_impl_t> +{ + static_assert ( k < n && j < n ); + static constexpr auto value = (j==k) ? e1 : get_impl_t>::value; +}; +} // namespace detail + +/** @brief Returns the j-th element of a pure static extents type with 0 <= j < size_v + * + * @code constexpr auto e_j = get_v,2>; + * +*/ +template +constexpr inline auto get_v = detail::get_impl_t,std::decay_t>::value; + + +//////////////// CAT ///////////////////// +namespace detail { +template +struct cat_impl_t +{ + template + struct inner; + template + struct inner < std::index_sequence, std::index_sequence > + { + using type = extents < get_v..., get_v... >; + }; + using type = typename inner < + std::make_index_sequence>, + std::make_index_sequence> >::type; +}; +} // namespace detail + +/** @brief Concatenates two static extents type + * + * @code using extents_t = cat,extents<7,6>>; // -> extents<4,3,2,7,6> + * + * @tparam EL left extents<...> + * @tparam ER right extents<...> +*/ +template +using cat_t = typename detail::cat_impl_t,std::decay_t>::type; + +//////////////// FOR_EACH //////////////// + +namespace detail { + +template class> +struct for_each_impl_t; + +template class UnaryOp, typename std::size_t ... es> +struct for_each_impl_t, UnaryOp > +{ using type = extents< ( UnaryOp::value )... >; }; + +template class UnaryOp, typename std::size_t ... is> +struct for_each_impl_t, UnaryOp > +{ using type = std::index_sequence< ( UnaryOp::value )... >; }; + +} // namespace detail + +/** @brief Applies a unary operation for each element of a given static extents type + * + * @code template struct add5 { static constexpr auto value = e+5; }; + * @code using extents_t = for_each,add5>; // -> extents<9,8,7> + * + * @tparam E extents<...> +*/ +template typename UnaryOp> +using for_each_t = typename detail::for_each_impl_t, UnaryOp>::type; + +//////////////// TEST //////////////// + +namespace detail { + +template class> +struct for_each_test_impl_t; + +template class UnaryPred, typename std::size_t ... es> +struct for_each_test_impl_t, UnaryPred > +{ using type = std::integer_sequence::value )... >; }; + +template class UnaryPred, typename std::size_t ... is> +struct for_each_test_impl_t, UnaryPred > +{ using type = std::integer_sequence::value )... >; }; + +} // namespace detail + +/** @brief Returns true if for each element of a given static extents type the unary predicate holds + * + * @code template struct equal5 { static constexpr bool value = e==5; }; + * @code using sequence_t = for_each,equal5>; // -> std::integer_sequence + * + * @tparam E extents<...> +*/ +template typename UnaryPred> +using for_each_test_t = typename detail::for_each_test_impl_t, UnaryPred>::type; + + +//////////////// SELECT INDEX SEQUENCE ///////////////// + +namespace detail { +template +struct select_impl_t +{ + static_assert( size_v >= I::size() ); + template struct inner; + template + struct inner > { using type = extents ... >; }; + using type = typename inner::type; +}; +} // namespace detail + +/** @brief Returns a static extents type selected from a static extents type using std::index_sequence + * + * @code using extents_t = select,std::index_sequence<0,2>>; // -> extents<4,2> + * + * @tparam E extents<...> + * @tparam S std::index_sequence<...> +*/ +template +using select_t = typename detail::select_impl_t, S>::type; + + +//////////////// BINARY PLUS OP ///////////////// + +template +struct plus_t { static constexpr auto value = i+j; }; + +template +constexpr inline auto plus_v = plus_t::value; + +template +struct multiply_t { static constexpr auto value = i*j;}; + + + +//////////////// SET ///////////////////// + +namespace detail { +template +struct set_impl_t; + +template +struct set_impl_t> +{ + static constexpr inline auto n = size_v>; + template using plus_j1 = plus_t; + + using head_indices = std::make_index_sequence; + using tail_indices = for_each_t,plus_j1>; + + using head = select_t,head_indices>; + using tail = select_t,tail_indices>; + using type = cat_t>,tail>; +}; +} // namespace detail + +/** @brief Sets the j-th element of a pure static extents type with 0 <= j < size_v + * + * @code using extents_t = set_t<2,5,extents<4,3,2>>; // extents<4,3,5> + * + * @tparam j j-th position in extents with 0 <= j < size_v + * @tparam e value to replace the j-th element + * @tparam E extents +*/ +template +using set_t = typename detail::set_impl_t>::type; + + + +//////////////// REVERSE ////////////////// + +namespace detail { +template +struct reverse_impl_t; + +template +struct reverse_impl_t> +{ + using type = extents < ( get_v-js-1>) ... >; +}; +} // namespace detail + +/** @brief Reverses static extents of a static extents type + * + * @code using extents_t = reverse_t>; // -> extents<2,3,4> + * + * @tparam E extents<...> +*/ +template +using reverse_t = typename detail::reverse_impl_t, std::make_index_sequence>>::type; + + +//////////////// REMOVE ////////////////// + +namespace detail{ +template +struct remove_element_impl_t +{ + static constexpr auto n = E::size(); + using head = select_t >; + + template + struct tail_indices; + template + struct tail_indices> + { + using type = extents< (is+k+1) ... >; + }; + using tail = select_t>::type>; + using type = cat_t< head, tail>; +}; +} // namespace detail + +/** @brief Removes a static extent of a static extents type + * + * @code using extents_t = remove<1,extents<4,3,2>>; // -> extents<4,2> + * @note it is a special case of the select function + * + * @tparam k zero-based index + * @tparam E extents<...> +*/ +template +using remove_element_t = typename detail::remove_element_impl_t>::type; + + + +//////////////// ACCUMULATE ///////////////////// + +namespace detail { + +template class> +struct accumulate_impl_t; + +template class op, std::size_t i> +struct accumulate_impl_t, i, op> +{ static constexpr auto value = i; }; + +template class op, std::size_t i, std::size_t e> +struct accumulate_impl_t, i, op> +{ static constexpr auto value = op::value; }; + +template class op, std::size_t i, std::size_t e, std::size_t ... es> +struct accumulate_impl_t, i, op> +{ + using next = accumulate_impl_t,i,op>; + static constexpr auto value = op::value; + +}; +} // namespace detail + +template class BinaryOp> +constexpr inline auto accumulate_v = detail::accumulate_impl_t>,I,BinaryOp>::value; + + +//////////////// Product ///////////////////// + +namespace detail { + +template +struct product_impl_t +{ + static constexpr auto value = empty_v ? 0UL : accumulate_v; +}; + +} // namespace detail +template +constexpr inline auto product_v = detail::product_impl_t>::value; + + +//////////////// ALL_OF ///////////////////// + + +namespace detail { + +template +struct all_of_impl_t; +template<> +struct all_of_impl_t> +{ static constexpr bool value = true; }; +template +struct all_of_impl_t> +{ static constexpr bool value = ( e && ... && es ); }; + +} // namespace detail + +/** @brief Returns true if all elements of Extents satisfy UnaryOp + * + * @code constexpr auto e_j = all_of_v>; +*/ +template class UnaryPred> +constexpr inline bool all_of_v = detail::all_of_impl_t,UnaryPred>>::value; + + +//////////////// ALL_OF ///////////////////// + +namespace detail { +template +struct any_of_impl_t; +template<> +struct any_of_impl_t> +{ static constexpr bool value = true;}; + +template +struct any_of_impl_t> +{ static constexpr bool value = ( e || ... || es ); }; + +} // namespace detail + +template class UnaryOp> +constexpr inline bool any_of_v = detail::any_of_impl_t,UnaryOp>>::value; + + +//////////////// IS_VALID ///////////////////// + +namespace detail { + +template +struct is_valid_impl_t { static constexpr bool value = false; }; +template<> +struct is_valid_impl_t> { static constexpr bool value = true ; }; + +template +struct is_valid_impl_t> +{ + template + struct greater_than_zero { static constexpr auto value = (n>0ul); }; + + static constexpr bool value = all_of_v,greater_than_zero >; +}; +} // namespace detail + +/** @brief Returns true if extents equals ([m,n,...,l]) with m>0,n>0,...,l>0 */ +template +constexpr inline bool is_valid_v = detail::is_valid_impl_t>::value; + + + +//////////////// IS_SCALAR ///////////////////// + +namespace detail { +template +struct is_scalar_impl_t +{ + template + struct equal_to_one { static constexpr auto value = (n == 1ul); }; + + static constexpr bool value = is_valid_v && + !empty_v && + all_of_v; +}; +} // namespace detail + +/** @brief Returns true if extents equals (m,[n,...,l]) with m=1,n=1,...,l=1 */ +template +constexpr inline bool is_scalar_v = detail::is_scalar_impl_t>::value; + + +//////////////// IS_VECTOR ///////////////////// + +namespace detail { + +template +struct is_vector_impl_t { static constexpr bool value = false; }; +template<> +struct is_vector_impl_t> { static constexpr bool value = false; }; + +template +struct is_vector_impl_t> { static constexpr bool value = (e>=1); }; + +template +struct is_vector_impl_t> +{ + template struct equal_to_one { static constexpr auto value = (n == 1ul); }; + template struct greater_than_zero { static constexpr auto value = (n > 0ul); }; + + static constexpr bool value = + is_valid_v > && + any_of_v ,greater_than_zero> && + any_of_v ,equal_to_one > && + all_of_v ,equal_to_one >; +}; + + +} // namespace detail + +/** @brief Returns true if extents equals (m,[n,1,...,1]) with m>=1||n>=1 && m==1||n==1*/ +template +constexpr inline bool is_vector_v = detail::is_vector_impl_t>::value; + + + +//////////////// IS_MATRIX ///////////////////// + +namespace detail { + +template +struct is_matrix; + +template<> +struct is_matrix> { static constexpr bool value = false; }; + +template +struct is_matrix> { static constexpr bool value = true; }; + +template +struct is_matrix> +{ + template struct equal_to_one { static constexpr auto value = (n == 1ul); }; + template struct greater_than_zero { static constexpr auto value = (n > 0ul); }; + + static constexpr bool value = + is_valid_v > && + all_of_v ,greater_than_zero > && + all_of_v ,equal_to_one >; +}; + + +} // namespace detail + +/** @brief Returns true if (m,n,[1,...,1]) with m>=1 or n>=1 */ +template +constexpr inline bool is_matrix_v = detail::is_matrix>::value; + + +//////////////// IS_TENSOR ///////////////////// + +namespace detail { + +template +struct is_tensor; + +template<> +struct is_tensor> { static constexpr bool value = false; }; + +template +struct is_tensor> { static constexpr bool value = false; }; + +template +struct is_tensor> +{ + template + struct greater_than_one { static constexpr auto value = (n > 1ul); }; + + static constexpr bool value = + is_valid_v > && + size_v > > 2ul && + any_of_v ,greater_than_one >; +}; + + +} // namespace detail + +/** @brief Returns true if extents is equal to (m,n,[1,...,1],k,[1,...,1]) with k > 1 */ +template +constexpr inline bool is_tensor_v = detail::is_tensor>::value; + + +//////////////// ARRAY_CONVERSION ///////////////////// + +namespace detail { +template +struct to_array_impl_t; + +template +struct to_array_impl_t> +{ static constexpr auto value = std::array{is... }; }; + +template +struct to_array_impl_t> +{ static constexpr auto value = std::array{is... }; }; + +} // namespace detail + +template +constexpr inline auto to_array_v = detail::to_array_impl_t>::value; + + + + +namespace detail { + +template +struct to_strides_impl_t; + +template +struct to_strides_impl_t > +{ + static_assert (is_valid_v); + + static constexpr bool is_first_order = std::is_same_v; + using adjusted_extents = std::conditional_t>; + + template + static constexpr std::size_t selected_product = product_v>>; + + using pre_type = extents <1,( selected_product ) ... >; + using type = std::conditional_t>; +}; + +} // namespace detail + +template +using to_strides_impl_t = typename detail::to_strides_impl_t-1>>::type; + +template +constexpr inline auto to_strides_v = to_array_v,L>>; + +} //namespace boost::numeric::ublas + + +template < + std::size_t l1, + std::size_t l2, + std::size_t r1, + std::size_t r2, + std::size_t ... l, + std::size_t ... r> +[[nodiscard]] inline constexpr bool operator==( + boost::numeric::ublas::extents /*unused*/, + boost::numeric::ublas::extents /*unused*/) +{ + return std::is_same_v< + boost::numeric::ublas::extents, + boost::numeric::ublas::extents>; +} + +template < + std::size_t l1, + std::size_t l2, + std::size_t r1, + std::size_t r2, + std::size_t ... l, + std::size_t ... r> +[[nodiscard]] inline constexpr bool operator!=( + boost::numeric::ublas::extents el, + boost::numeric::ublas::extents er) +{ + return !(el == er); +} + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_FUNCTIONS_HPP + + diff --git a/include/boost/numeric/ublas/tensor/extents/extents_static_size.hpp b/include/boost/numeric/ublas/tensor/extents/extents_static_size.hpp new file mode 100644 index 000000000..f274dfafc --- /dev/null +++ b/include/boost/numeric/ublas/tensor/extents/extents_static_size.hpp @@ -0,0 +1,148 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_SIZE_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_STATIC_SIZE_HPP + +#include +#include +#include +#include +#include + + +#include "extents_functions.hpp" +#include "extents_base.hpp" + +#include "../layout.hpp" +#include "../concepts.hpp" + + +namespace boost::numeric::ublas +{ + +/** @brief Class template for storing static-number of extents + * + * @code auto e = extents<3>{3,2,4}; @endcode + * + * @tparam N number of extents + * + */ +template +class extents_core : public extents_base> +{ +public: + using base_type = std::array; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using const_pointer = typename base_type::const_pointer; + using const_iterator = typename base_type::const_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + constexpr extents_core() = default; + + constexpr explicit extents_core(base_type data) + : _base(std::move(data)) + { + if ( !ublas::is_valid(*this) ){ + throw std::invalid_argument("in boost::numeric::ublas::extents : " + "could not intanstiate extents as provided extents are not valid."); + } + } + + + constexpr extents_core(std::initializer_list const& li) + : _base() + { + if( li.size() != ublas::size(*this) ){ + throw std::length_error("in boost::numeric::ublas::extents : " + "could not intanstiate extents as number of indices exceed N."); + } + + std::copy(li.begin(), li.end(), _base.begin()); + + if ( !ublas::is_valid(*this) ){ + throw std::invalid_argument("in boost::numeric::ublas::extents : " + "could not intanstiate extents as provided extents are not valid."); + } + } + + constexpr extents_core(const_iterator begin, const_iterator end) + { + if( std::distance(begin,end) < 0 || static_cast(std::distance(begin,end)) > this->base().size()){ + throw std::out_of_range("in boost::numeric::ublas::extents : " + "initializer list size is greater than the rank"); + } + + std::copy(begin, end, _base.begin()); + + if ( !ublas::is_valid(*this) ) { + throw std::invalid_argument("in boost::numeric::ublas::extents::ctor: " + "could not intanstiate extents as provided extents are not valid."); + } + } + constexpr extents_core(extents_core const& other) + : _base(other._base) + { + assert(ublas::is_valid(*this)); + } + + constexpr extents_core(extents_core && other) noexcept + : _base( std::move(other._base) ) + { + } + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + constexpr extents_core& operator=(extents_core other) + noexcept(std::is_nothrow_swappable_v) + { + swap(*this,other); + return *this; + } + + ~extents_core() = default; + + + friend void swap(extents_core& lhs, extents_core& rhs) + noexcept(std::is_nothrow_swappable_v) + { + std::swap(lhs._base, rhs._base); + } + + [[nodiscard]] inline constexpr const_reference at (size_type k) const { return this->_base.at(k); } + [[nodiscard]] inline constexpr const_reference operator[](size_type k) const { return this->_base[k]; } + [[nodiscard]] inline constexpr auto const& base () const noexcept { return this->_base; } + [[nodiscard]] inline constexpr const_pointer data () const noexcept { return this->_base.data(); } + + +private: + base_type _base{}; +}; + +} // namespace boost::numeric::ublas + + + + +namespace boost::numeric::ublas{ +template struct is_extents < extents_core > : std::true_type {}; +template struct is_dynamic < extents_core > : std::true_type {}; +template struct is_static_rank < extents_core > : std::true_type {}; +} // namespace boost::numeric::ublas + + + + +#endif diff --git a/include/boost/numeric/ublas/tensor/extents_functions.hpp b/include/boost/numeric/ublas/tensor/extents_functions.hpp deleted file mode 100644 index 46f15f93a..000000000 --- a/include/boost/numeric/ublas/tensor/extents_functions.hpp +++ /dev/null @@ -1,449 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ - -#include -#include -#include -#include -#include -#include - -namespace boost::numeric::ublas::detail{ - - template - constexpr auto push_back(basic_static_extents) -> basic_static_extents; - - template - constexpr auto push_front(basic_static_extents) -> basic_static_extents; - - template - constexpr auto pop_front(basic_static_extents) -> basic_static_extents; - - template - constexpr auto any_extents_greater_than_one([[maybe_unused]] basic_static_extents const& e) noexcept{ - constexpr auto sz = sizeof...(Es); - return sz && ( ( Es > T(1) ) || ... ); - } - - template - constexpr auto squeeze_impl_remove_one( - [[maybe_unused]] basic_static_extents e, - basic_static_extents num = basic_static_extents{} - ){ - // executed when basic_static_extents is size of 1 - // @code basic_static_extents @endcode - if constexpr( sizeof...(E) == 0ul ){ - // if element E0 is 1 we return number list but we do not append - // it to the list - if constexpr( E0 == T(1) ){ - return num; - }else{ - // if element E0 is 1 we return number list but we append - // it to the list - return decltype(push_back(num)){}; - } - }else{ - if constexpr( E0 == T(1) ){ - // if element E0 is 1 we return number list but we do not append - // it to the list - return squeeze_impl_remove_one(basic_static_extents{}, num); - }else{ - // if element E0 is 1 we return number list but we append - // it to the list - auto n_num_list = decltype(push_back(num)){}; - return squeeze_impl_remove_one(basic_static_extents{}, n_num_list); - } - } - } - - template - constexpr auto squeeze_impl( basic_static_extents const& e ){ - - using extents_type = basic_static_extents; - - if constexpr( extents_type::_size <= typename extents_type::size_type(2) ){ - return e; - } - - using value_type = typename extents_type::value_type; - using size_type = typename extents_type::size_type; - - auto one_free_static_extents = squeeze_impl_remove_one(e); - - // check after removing 1s from the list are they same - // if same that means 1s does not exist and no need to - // squeeze - if constexpr( decltype(one_free_static_extents)::_size != extents_type::_size ){ - - // after squeezing, all the extents are 1s we need to - // return extents of (1, 1) - if constexpr( decltype(one_free_static_extents)::_size == size_type(0) ){ - - return basic_static_extents{}; - - }else if constexpr( decltype(one_free_static_extents)::_size == (1) ){ - // to comply with GNU Octave this check is made - // if position 2 contains 1 we push at back - // else we push at front - if constexpr( extents_type::at(1) == value_type(1) ){ - return decltype( push_back(one_free_static_extents) ){}; - }else{ - return decltype( push_front(one_free_static_extents) ){}; - } - - }else{ - return one_free_static_extents; - } - - }else{ - return e; - } - - } - - template - inline - constexpr auto squeeze_impl( basic_extents const& e ){ - using extents_type = basic_extents; - using base_type = typename extents_type::base_type; - using value_type = typename extents_type::value_type; - using size_type = typename extents_type::size_type; - - if( e.size() <= size_type(2) ){ - return e; - } - - auto not_one = [](auto const& el){ - return el != value_type(1); - }; - - // count non one values - size_type size = std::count_if(e.begin(), e.end(), not_one); - - // reserve space - base_type n_extents( std::max(size, size_type(2)), 1 ); - - // copying non 1s to the new extents - std::copy_if(e.begin(), e.end(), n_extents.begin(), not_one); - - // checking if extents size goes blow 2 - // if size of extents goes to 1 - // complying with GNU Octave - // if position 2 contains 1 we - // swap the pos - if( size < size_type(2) && e[1] != value_type(1) ){ - std::swap(n_extents[0], n_extents[1]); - } - - return extents_type(n_extents); - } - - template - inline - auto squeeze_impl( basic_fixed_rank_extents const& e ){ - if constexpr( N <= 2 ){ - return e; - }else{ - return squeeze_impl(basic_extents(e)); - } - } - - - -} // namespace boost::numeric::ublas::detail - -namespace boost::numeric::ublas { - -/** @brief Returns true if size > 1 and all elements > 0 or size == 1 && e[0] == 1 */ -template -[[nodiscard]] inline -constexpr bool is_valid(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_valid() : invalid type, type should be an extents"); - - auto greater_than_zero = [](auto const& a){ return a > 0u; }; - - if( e.size() == 1u ) return e[0] == 1u; - return !e.empty() && std::all_of(e.begin(), e.end(), greater_than_zero ); -} - -/** @brief Returns true if size > 1 and all elements > 0 or size == 1 && e[0] == 1 */ -template -[[nodiscard]] inline -constexpr bool is_valid( [[maybe_unused]] basic_static_extents const &e) noexcept { - constexpr auto sz = sizeof...(Es); - /// if number of extents is 1 then extents at 0th pos should be 1 - /// else if number of extents is greater than 1 then all the extents - /// should be greater than 0 - /// else return false - return ( ( sz == 1ul ) && ( ( T(1) == Es ) && ... ) ) || - ( ( sz > 1ul ) && ( ( T(0) < Es ) && ... ) ); -} - -/** - * @code static_extents<4,1,2,3,4> s; - * std::cout< -[[nodiscard]] inline -std::string to_string(T const &e) { - - using value_type = typename T::value_type; - - static_assert(is_extents_v ||is_strides_v, - "boost::numeric::ublas::to_string() : invalid type, type should be an extents or a strides"); - - if ( e.empty() ) return "[]"; - - std::stringstream ss; - - ss << "[ "; - - std::copy( e.begin(), e.end() - 1, std::ostream_iterator(ss,", ") ); - - ss << e.back() << " ]"; - - return ss.str(); -} - -/** @brief Returns true if this has a scalar shape - * - * @returns true if (1,1,[1,...,1]) - */ -template -[[nodiscard]] inline -constexpr bool is_scalar(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_scalar() : invalid type, type should be an extents"); - - auto equal_one = [](auto const &a) { return a == 1u; }; - - return !e.empty() && std::all_of(e.begin(), e.end(), equal_one); -} - -/** @brief Returns true if this has a scalar shape - * - * @returns true if (1,1,[1,...,1]) - */ -template -[[nodiscard]] inline -constexpr bool is_scalar( [[maybe_unused]] basic_static_extents const &e) noexcept { - constexpr auto sz = sizeof...(Es); - /// if number of extents is greater than 1 then all the extents should be 1 - /// else return false; - return sz && ( ( T(1) == Es ) && ... ); -} - -/** @brief Returns true if this has a vector shape - * - * @returns true if (1,n,[1,...,1]) or (n,1,[1,...,1]) with n > 1 - */ -template -[[nodiscard]] inline -constexpr bool is_vector(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_vector() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u; }; - auto equal_one = [](auto const &a) { return a == 1u; }; - - if (e.empty()) return false; - if (e.size() == 1u) return e[0] > 1u; - return std::any_of(e.begin(), e.begin() + 2, greater_one) && - std::any_of(e.begin(), e.begin() + 2, equal_one) && - std::all_of(e.begin() + 2, e.end(), equal_one); - -} - -/** @brief Returns true if this has a vector shape - * - * @returns true if (1,n,[1,...,1]) or (n,1,[1,...,1]) with n > 1 - */ -template -[[nodiscard]] inline -constexpr bool is_vector( [[maybe_unused]] basic_static_extents const &e) noexcept { - using extents_type = basic_static_extents; - - if constexpr (sizeof... (Es) == 1ul) return extents_type::at(0) > T(1); - else if constexpr (sizeof... (Es) >= 2ul){ - /// first two elements of the extents cannot be greater than 1 at the - /// same time which xor operation keeps in check - /// example: 0 xor 1 => 1, 1 xor 1 => 0, 1 xor 0 => 1, and 0 xor 0 => 0 - constexpr bool first_two_extents = ( extents_type::at(0) > T(1) ) ^ ( extents_type::at(1) > T(1) ); - - /// poping first two elements from the extents and checking is_scalar - /// basic_static_extents after poping two times becomes - /// basic_static_extents - using extents_after_removing_the_first_element = decltype( detail::pop_front( e ) ); - using extents_after_removing_the_second_element = decltype( detail::pop_front( extents_after_removing_the_first_element{} ) ); - return first_two_extents && - ( extents_after_removing_the_second_element::_size == 0ul || - is_scalar(extents_after_removing_the_second_element{}) - ); - } else return false; -} - -/** @brief Returns true if this has a matrix shape - * - * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 - */ -template -[[nodiscard]] inline -constexpr bool is_matrix(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_matrix() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u; }; - auto equal_one = [](auto const &a) { return a == 1u; }; - - return ( e.size() >= 2u ) && std::all_of(e.begin(), e.begin() + 2, greater_one) && - std::all_of(e.begin() + 2, e.end(), equal_one); -} - -/** @brief Returns true if this has a matrix shape - * - * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 - */ -template -[[nodiscard]] inline -constexpr bool is_matrix( [[maybe_unused]] basic_static_extents const &e) noexcept { - using extents_type = basic_static_extents; - - if constexpr (sizeof... (Es) >= 2ul){ - /// first two elements of the extents should be greater than 1 at the - /// same time and remaing range should be scalar or empty - constexpr bool first_two_extents = ( extents_type::at(0) > T(1) ) && ( extents_type::at(1) > T(1) ); - - /// poping first two elements from the extents and checking is_scalar - /// basic_static_extents after poping two times becomes - /// basic_static_extents - using extents_after_removing_the_first_element = decltype( detail::pop_front( e ) ); - using extents_after_removing_the_second_element = decltype( detail::pop_front( extents_after_removing_the_first_element{} ) ); - return first_two_extents && - ( extents_after_removing_the_second_element::_size == 0ul || - is_scalar(extents_after_removing_the_second_element{}) - ); - } else return false; -} - -/** @brief Returns true if this is has a tensor shape - * - * @returns true if !empty() && !is_scalar() && !is_vector() && !is_matrix() - */ -template -[[nodiscard]] inline -constexpr bool is_tensor(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_tensor() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u;}; - - return ( e.size() >= 3u ) && std::any_of(e.begin() + 2, e.end(), greater_one); -} - -/** @brief Returns true if this is has a tensor shape - * - * @returns true if !empty() && !is_scalar() && !is_vector() && !is_matrix() - */ -template -[[nodiscard]] inline -constexpr bool is_tensor( [[maybe_unused]] basic_static_extents const &e) noexcept { - if constexpr( sizeof...(Es) >= 3ul ){ - /// poping first two elements from the extents and checking the remaining - /// extent, if any extent is greater than 1 - /// basic_static_extents after poping two times becomes - /// basic_static_extents - using extents_after_removing_the_first_element = decltype( detail::pop_front( e ) ); - using extents_after_removing_the_second_element = decltype( detail::pop_front( extents_after_removing_the_first_element{} ) ); - return detail::any_extents_greater_than_one(extents_after_removing_the_second_element{}); - - } else return false; -} - -/** @brief Eliminates singleton dimensions when size > 2 - * - * squeeze { 1,1} -> { 1,1} - * squeeze { 2,1} -> { 2,1} - * squeeze { 1,2} -> { 1,2} - * - * squeeze {1,2,3} -> { 2,3} - * squeeze {2,1,3} -> { 2,3} - * squeeze {1,3,1} -> { 1,3} - * - * @returns basic_extents with squeezed extents - */ -template -[[nodiscard]] inline -auto squeeze(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::squeeze() : invalid type, type should be an extents"); - - return detail::squeeze_impl(e); -} - -/** @brief Returns the product of extents */ -template -[[nodiscard]] inline -constexpr auto product(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::product() : invalid type, type should be an extents"); - - if ( e.empty() ) return 0u; - return std::accumulate(e.begin(), e.end(), 1u, std::multiplies<>()) ; -} - -/** @brief Returns the product of static extents at compile-time */ -template -[[nodiscard]] inline -constexpr auto product( [[maybe_unused]] basic_static_extents const &e) noexcept { - if constexpr( sizeof...(Es) == 0 ) return T(0); - else return T( (Es * ...) ); -} - - -template && is_extents_v - , int> = 0 -> -[[nodiscard]] inline -constexpr bool operator==(LExtents const& lhs, RExtents const& rhs) noexcept{ - - static_assert( std::is_same_v, - "boost::numeric::ublas::operator==(LExtents, RExtents) : LHS value type should be same as RHS value type"); - - return ( lhs.size() == rhs.size() ) && std::equal(lhs.begin(), lhs.end(), rhs.begin()); -} - -template && is_extents_v - , int> = 0 -> -[[nodiscard]] inline -constexpr bool operator!=(LExtents const& lhs, RExtents const& rhs) noexcept{ - - static_assert( std::is_same_v, - "boost::numeric::ublas::operator!=(LExtents, RExtents) : LHS value type should be same as RHS value type"); - - return !( lhs == rhs ); -} - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/fixed_rank_extents.hpp b/include/boost/numeric/ublas/tensor/fixed_rank_extents.hpp deleted file mode 100644 index a3aa3603d..000000000 --- a/include/boost/numeric/ublas/tensor/fixed_rank_extents.hpp +++ /dev/null @@ -1,248 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - - -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_FIXED_RANK_EXTENTS_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_FIXED_RANK_EXTENTS_HPP_ - -#include -#include -#include -#include -#include -#include - -namespace boost::numeric::ublas { - -/** @brief Template class for storing tensor extents for compile time. - * - * @code basic_static_extents<1,2,3,4> t @endcode - * @tparam E parameter pack of extents - * - */ -template -class basic_fixed_rank_extents -{ - -public: - - static constexpr std::size_t const _size = N; - - using base_type = std::array; - using value_type = typename base_type::value_type; - using size_type = typename base_type::size_type; - using reference = typename base_type::reference; - using const_reference = typename base_type::const_reference; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - static_assert( std::numeric_limits::is_integer, "Static error in basic_fixed_rank_extents: type must be of type integer."); - static_assert(!std::numeric_limits::is_signed, "Static error in basic_fixed_rank_extents: type must be of type unsigned integer."); - - //@returns the rank of basic_static_extents - [[nodiscard]] - static constexpr size_type size() noexcept { return _size; } - - [[nodiscard]] inline - constexpr const_reference at(size_type k) const{ - return _base.at(k); - } - - [[nodiscard]] inline - constexpr reference at(size_type k){ - return _base.at(k); - } - - [[nodiscard]] inline - constexpr const_reference operator[](size_type k) const noexcept{ - return _base[k]; - } - - [[nodiscard]] inline - constexpr reference operator[](size_type k) noexcept{ - return _base[k]; - } - - constexpr basic_fixed_rank_extents() = default; - - constexpr basic_fixed_rank_extents(basic_fixed_rank_extents const& other) - : _base(other._base) - {} - - constexpr basic_fixed_rank_extents(basic_fixed_rank_extents && other) noexcept - : _base( std::move(other._base) ) - {} - - constexpr basic_fixed_rank_extents& operator=(basic_fixed_rank_extents const& other) - noexcept(std::is_nothrow_swappable_v) - { - basic_fixed_rank_extents temp(other); - swap(*this,temp); - return *this; - } - - constexpr basic_fixed_rank_extents& operator=(basic_fixed_rank_extents && other) - noexcept(std::is_nothrow_swappable_v) - { - swap(*this,other); - return *this; - } - - ~basic_fixed_rank_extents() = default; - - constexpr basic_fixed_rank_extents(std::initializer_list li){ - if( li.size() > _size ){ - throw std::out_of_range("boost::numeric::ublas::basic_fixed_rank_extents(std::initializer_list): " - "number of elements in std::initializer_list is greater than the size" - ); - } - - std::copy(li.begin(), li.end(), _base.begin()); - - if ( !is_valid(*this) ){ - throw std::length_error("Error in basic_fixed_rank_extents::basic_fixed_rank_extents() : " - "shape tuple is not a valid permutation: has zero elements." - ); - } - } - - constexpr basic_fixed_rank_extents(const_iterator begin, const_iterator end){ - if( std::distance(begin,end) < 0 || static_cast(std::distance(begin,end)) > _size){ - throw std::out_of_range("boost::numeric::ublas::basic_fixed_rank_extents(): initializer list size is greater than the rank"); - } - - std::copy(begin, end, _base.begin()); - - if ( !is_valid(*this) ){ - throw std::length_error("Error in basic_fixed_rank_extents::basic_fixed_rank_extents(const_iterator,const_iterator) : " - "shape tuple is not a valid permutation: has zero elements." - ); - } - } - - inline - constexpr void fill( value_type value ){ - _base.fill(value); - } - - template - constexpr basic_fixed_rank_extents(OtherExtents const& e){ - static_assert( is_extents_v, "boost::numeric::ublas::basic_fixed_rank_extents(OtherExtents const&) : " - "OtherExtents should be a valid tensor extents" - ); - - if constexpr( is_static_rank_v< OtherExtents > ){ - static_assert( OtherExtents::_size == _size, - "basic_fixed_rank_extents::basic_fixed_rank_extents(OtherExtents const&) : " - "unequal rank found, rank should be equal" - ); - }else{ - if( e.size() != size() ){ - throw std::length_error("Error in basic_fixed_rank_extents::basic_fixed_rank_extents(OtherExtents const&) : " - "unequal rank found, rank should be equal" - ); - } - } - - std::copy_n(e.begin(),_size, _base.begin()); - } - - constexpr basic_fixed_rank_extents(base_type const& data) - : _base(data) - { - if ( !is_valid(*this) ){ - throw std::length_error("Error in basic_fixed_rank_extents::basic_fixed_rank_extents(base_type const&) : " - "shape tuple is not a valid permutation: has zero elements." - ); - } - } - - constexpr basic_fixed_rank_extents(base_type&& data) - : _base(std::move(data)) - { - if ( !is_valid(*this) ){ - throw std::length_error("Error in basic_fixed_rank_extents::basic_fixed_rank_extents(base_type &&) : " - "shape tuple is not a valid permutation: has zero elements." - ); - } - } - - /** @brief Returns the std::vector containing extents */ - [[nodiscard]] inline - constexpr base_type const& base() const noexcept{ - return _base; - } - - /** @brief Checks if extents is empty or not - * - * @returns true if rank is 0 else false - * - */ - [[nodiscard]] inline - constexpr bool empty() const noexcept { return _size == size_type{0}; } - - friend void swap(basic_fixed_rank_extents& lhs, basic_fixed_rank_extents& rhs) - noexcept(std::is_nothrow_swappable_v) - { - std::swap(lhs._base , rhs._base ); - } - - [[nodiscard]] inline - constexpr const_pointer data() const noexcept - { - return _base.data(); - } - - [[nodiscard]] inline - constexpr const_iterator - begin() const noexcept - { - return _base.begin(); - } - - [[nodiscard]] inline - constexpr const_iterator - end() const noexcept - { - return _base.end(); - } - - [[nodiscard]] inline - constexpr const_reference back () const - { - return _base.back(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rbegin() const noexcept - { - return _base.rbegin(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rend() const noexcept - { - return _base.rend(); - } - -private: - base_type _base{}; -}; - -} // namespace boost::numeric::ublass - - - -#endif diff --git a/include/boost/numeric/ublas/tensor/fixed_rank_strides.hpp b/include/boost/numeric/ublas/tensor/fixed_rank_strides.hpp index 952f6e29e..011a4c64c 100644 --- a/include/boost/numeric/ublas/tensor/fixed_rank_strides.hpp +++ b/include/boost/numeric/ublas/tensor/fixed_rank_strides.hpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,11 +12,17 @@ /// \file strides.hpp Definition for the basic_strides template class -#ifndef _BOOST_UBLAS_TENSOR_FIXED_RANK_STRIDES_HPP_ -#define _BOOST_UBLAS_TENSOR_FIXED_RANK_STRIDES_HPP_ -#include -#include +#ifndef BOOST_UBLAS_TENSOR_FIXED_RANK_STRIDES_HPP +#define BOOST_UBLAS_TENSOR_FIXED_RANK_STRIDES_HPP + +#if 0 + +#include "detail/strides_functions.hpp" +#include "extents/extents_static_size.hpp" +#include "layout.hpp" +#include "strides_base.hpp" + namespace boost::numeric::ublas { @@ -25,126 +31,63 @@ namespace boost::numeric::ublas { * Proxy template class of std::array. * */ -template -class basic_fixed_rank_strides +template +class strides,L> : public strides_base,L>> { public: - static constexpr std::size_t const _size = N; - - using layout_type = L; - using base_type = std::array; - using value_type = typename base_type::value_type; - using reference = typename base_type::reference; - using const_reference = typename base_type::const_reference; - using size_type = typename base_type::size_type; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - static_assert( std::numeric_limits::is_integer, - "Static error in boost::numeric::ublas::basic_fixed_rank_strides: type must be of type integer."); - static_assert(!std::numeric_limits::is_signed, - "Static error in boost::numeric::ublas::basic_fixed_rank_strides: type must be of type unsigned integer."); - static_assert(std::is_same::value || std::is_same::value, - "Static error in boost::numeric::ublas::basic_fixed_rank_strides: layout type must either first or last order"); - - /** @brief Default constructs basic_fixed_rank_strides + using extents_type = extents; + using layout_type = L; + using base_type = typename extents_type::base_type; + using value_type = typename base_type::value_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using size_type = typename base_type::size_type; + using const_pointer = typename base_type::const_pointer; + using const_iterator = typename base_type::const_iterator; + using const_reverse_iterator = typename base_type::const_reverse_iterator; + + + static_assert(std::is_same::value || + std::is_same::value); + /** @brief Default constructs strides with static size * - * @code auto ex = basic_fixed_rank_strides{}; + * @code auto s = strides>{}; */ - constexpr basic_fixed_rank_strides() noexcept = default; + constexpr strides() noexcept = default; - /** @brief Constructs basic_fixed_rank_strides from basic_extents for the first- and last-order storage formats + /** @brief Constructs strides from extents with static size for the first- and last-order storage formats * - * @code auto strides = basic_fixed_rank_strides( basic_extents{2,3,4} ); + * @code auto s = strides>({2,3,4},layout::first_order{}); * */ - template - constexpr basic_fixed_rank_strides(ExtentsType const& s) + constexpr explicit strides(extents_type const& e) + : _base(compute_strides(e)) { - static_assert( is_extents_v, "boost::numeric::ublas::basic_fixed_rank_strides(ExtentsType const&) : " - "ExtentsType is not a tensor extents" - ); - - if constexpr( is_static_rank_v< ExtentsType > ){ - static_assert( ExtentsType::_size == _size, - "boost::numeric::ublas::basic_fixed_rank_strides(ExtentsType const&) : " - "ExentsType size should be equal to the size of basic_fixed_rank_strides" - ); - }else{ - if ( s.size() != size() ){ - throw std::length_error( - "boost::numeric::ublas::basic_fixed_rank_strides(ExtentsType const&) : " - "ExentsType size should be equal to the size of basic_fixed_rank_strides" - ); - } - } - - _base.fill(value_type(1)); - - if( s.empty() ) - return; - - if( !is_valid(s) ) - throw std::runtime_error("Error in boost::numeric::ublas::basic_fixed_rank_strides(ExtentsType const&) : " - "shape is not valid." - ); - - if( is_vector(s) || is_scalar(s) ) - return; - - if( this->size() < 2 ) - throw std::runtime_error("Error in boost::numeric::ublas::basic_fixed_rank_strides(ExtentsType const&) : " - "size of strides must be greater or equal 2." - ); - - - if constexpr (std::is_same::value){ - std::transform(s.begin(), s.end() - 1, _base.begin(), _base.begin() + 1, std::multiplies{}); - }else { - std::transform(s.rbegin(), s.rend() - 1, _base.rbegin(), _base.rbegin() + 1, std::multiplies{}); - } - } - - constexpr basic_fixed_rank_strides(basic_fixed_rank_strides const& l) + } + + constexpr strides(strides const& l) : _base(l._base) {} - constexpr basic_fixed_rank_strides(basic_fixed_rank_strides && l ) noexcept + constexpr strides(strides && l ) noexcept : _base(std::move(l._base)) {} - constexpr basic_fixed_rank_strides(base_type const& l ) - : _base(l) - {} + ~strides() = default; - constexpr basic_fixed_rank_strides(base_type && l ) - : _base(std::move(l)) - {} - - ~basic_fixed_rank_strides() = default; - - - basic_fixed_rank_strides& operator=(basic_fixed_rank_strides const& other) - noexcept(std::is_nothrow_swappable_v) - { - basic_fixed_rank_strides temp(other); - swap (*this, temp); - return *this; - } - basic_fixed_rank_strides& operator=(basic_fixed_rank_strides && other) + strides& operator=(strides other) noexcept(std::is_nothrow_swappable_v) { swap (*this, other); return *this; } - friend void swap(basic_fixed_rank_strides& lhs, basic_fixed_rank_strides& rhs) + friend void swap(strides& lhs, strides& rhs) noexcept(std::is_nothrow_swappable_v) { - std::swap(lhs._base , rhs._base); + std::swap(lhs._base,rhs._base); } [[nodiscard]] inline @@ -191,7 +134,7 @@ class basic_fixed_rank_strides constexpr const_iterator end() const noexcept{ return _base.end(); } - + [[nodiscard]] inline constexpr base_type const& base() const noexcept{ return this->_base; @@ -212,10 +155,41 @@ class basic_fixed_rank_strides } private: - base_type _base; + base_type _base; + static constexpr std::size_t const _size = N; + + + [[nodiscard]] inline auto compute_strides( extents_type const& e) + { + using base_type = typename extents_type::base_type; + namespace ub = boost::numeric::ublas; + auto init = [](std::index_sequence){ return base_type{is...}; }; + + auto s = init(std::make_index_sequence{}); + + if (std::tuple_size_v == 0UL) + return s; + if (ub::is_vector(e) || ub::is_scalar(e)) + return s; + + if constexpr(std::is_same_v){ + std::transform(ub::begin(e), ub::end(e) - 1, s.begin(), s.begin() + 1, std::multiplies<>{}); + } + else { + std::transform(ub::rbegin(e), ub::rbegin(e) - 1, s.rbegin(), s.rbegin() + 1, std::multiplies<>{}); + } + + return s; + } }; -} // namespace boost::numeric::ublass +template struct is_strides ,L>> : std::true_type {}; +template struct is_dynamic ,L>> : std::true_type {}; +template struct is_static_rank ,L>> : std::true_type {}; + +} // namespace boost::numeric::ublas + +#endif #endif diff --git a/include/boost/numeric/ublas/tensor/function/conj.hpp b/include/boost/numeric/ublas/tensor/function/conj.hpp new file mode 100644 index 000000000..d30d08385 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/conj.hpp @@ -0,0 +1,81 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_CONJ_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_CONJ_HPP + +#include +#include +#include + + +#include "../extents/extents_functions.hpp" +#include "../traits/basic_type_traits.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" + +namespace boost::numeric::ublas +{ +template +class tensor_core; + +template +struct tensor_engine; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ +/** @brief Computes the complex conjugate component of tensor elements within a tensor expression + * + * @param[in] lhs tensor expression + * @returns unary tensor expression + */ +template::value_type>, int > = 0 + > +auto conj(detail::tensor_expression< tensor_core, D > const& expr) +{ + return detail::make_unary_tensor_expression< tensor_core > (expr(), [] (auto const& l) { return std::conj( l ); } ); +} + +/** @brief Computes the complex conjugate component of tensor elements within a tensor expression + * + * @param[in] expr tensor expression + * @returns complex tensor + */ +template +auto conj(detail::tensor_expression const& expr) +{ + using tensor_type = T; + using value_type = typename tensor_type::value_type; + using complex_type = std::complex; + using layout_type = typename tensor_type::layout_type; + using container_type = typename tensor_type::container_type; + using extents_type = typename tensor_type::extents_type; + using return_container_type = typename container_traits::template rebind; + using return_tensor_type = tensor_core>; + + if( ublas::empty( detail::retrieve_extents( expr ) ) ){ + throw std::runtime_error("error in boost::numeric::ublas::conj: tensors should not be empty."); + } + + auto a = tensor_type( expr ); + auto c = return_tensor_type( a.extents() ); + + std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::conj(l) ; } ); + + return c; +} + + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/function/imag.hpp b/include/boost/numeric/ublas/tensor/function/imag.hpp new file mode 100644 index 000000000..0058cb188 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/imag.hpp @@ -0,0 +1,82 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_IMAG_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_IMAG_HPP + +#include +#include +#include + + +#include "../extents/extents_functions.hpp" +#include "../traits/basic_type_traits.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" + +namespace boost::numeric::ublas +{ +template +class tensor_core; + +template +struct tensor_engine; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +/** @brief Extract the imaginary component of tensor elements within a tensor expression + * + * @param[in] lhs tensor expression + * @returns unary tensor expression + */ +template +auto imag(detail::tensor_expression const& lhs) { + return detail::make_unary_tensor_expression (lhs(), [] (auto const& l) { return std::imag( l ); } ); +} + + +/** @brief Extract the imag component of tensor elements within a tensor expression + * + * @param[in] lhs tensor expression + * @returns unary tensor expression + */ +template::value_type>, int > = 0 + > +auto imag(detail::tensor_expression< tensor_core< TE > ,D> const& expr) +{ + using tensor_type = tensor_core< TE >; + using complex_type = typename tensor_type::value_type; + using value_type = typename complex_type::value_type; + using layout_type = typename tensor_type::layout_type; + using container_type = typename tensor_type::container_type; + using extents_type = typename tensor_type::extents_type; + using return_container_type = typename container_traits::template rebind; + + using return_tensor_type = tensor_core>; + + if( ublas::empty( detail::retrieve_extents( expr ) ) ){ + throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty."); + } + + auto a = tensor_type( expr ); + auto c = return_tensor_type( a.extents() ); + + std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::imag(l) ; } ); + + return c; +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/function/init.hpp b/include/boost/numeric/ublas/tensor/function/init.hpp new file mode 100644 index 000000000..3a19c3507 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/init.hpp @@ -0,0 +1,120 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_FUNCTIONS_INIT_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_FUNCTIONS_INIT_HPP + +#include "../extents.hpp" +#include "../tensor.hpp" +#include "../concepts.hpp" + +//#include + + +namespace boost::numeric::ublas +{ + +template +struct init +{ + using container = std::vector; + using tensor = tensor_core, L, container>>; + + inline auto operator()(extents<> const& e) const + { + auto p = ublas::product(e); + return tensor(e,container(p,V{n})); + } + + + template + inline auto operator()(Ns ... ns) const + { + auto p = ( std::size_t(1) * ... * std::size_t(ns) ); + return tensor(extents<>{std::size_t(ns)...},container(p,V{n})); + } +}; + +template +using ones = init; + +template +using zeros = init; + + + +template +struct init_static_rank +{ + using container = std::vector; + + template + inline auto operator()(extents const& e) const + { + auto p = ublas::product(e); + using tensor = tensor_core, L, container>>; + + return tensor(e, container(p,V{k})); + } + + + template + inline auto operator()(Ns ... ns) const + { + constexpr auto n = sizeof...(ns); + auto p = ( std::size_t(1) * ... * std::size_t(ns) ); + using tensor = tensor_core, L, container>>; + + return tensor(extents{std::size_t(ns)...}, container(p,V{k})); + } +}; + +template +using ones_static_rank = init_static_rank; + +template +using zeros_static_rank = init_static_rank; + + +template +struct init_static +{ + template + struct inner; + + template + struct inner> + { + static constexpr auto n = sizeof...(is); + // NOLINTNEXTLINE(bugprone-integer-division) + static constexpr auto value = std::array{ V(k*(is+1)/(is+1)) ... }; + }; + + template + constexpr inline auto operator()(extents const& /**/) const + { + using extents_type = extents; + constexpr auto p = product_v; + constexpr auto c = inner>::value; + using tensor = tensor_core>>; + return tensor(c); + } +}; + + +template +using ones_static = init_static; + +template +using zeros_static = init_static; + + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_FUNCTIONS_ONES_HPP diff --git a/include/boost/numeric/ublas/tensor/function/inner_prod.hpp b/include/boost/numeric/ublas/tensor/function/inner_prod.hpp new file mode 100644 index 000000000..e82e2d10d --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/inner_prod.hpp @@ -0,0 +1,68 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_INNER_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_INNER_HPP + +#include +#include + +#include "../extents.hpp" +#include "../multiplication.hpp" + + +namespace boost::numeric::ublas +{ +template +class tensor_core; +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +/** @brief Computes the inner product of two tensors * + * Implements c = sum(A[i1,i2,...,ip] * B[i1,i2,...,jp]) + * + * @note calls inner function + * + * @param[in] a tensor object A + * @param[in] b tensor object B + * + * @returns a value type. + */ +template +inline decltype(auto) inner_prod(tensor_core< TE1 > const &a, tensor_core< TE2 > const &b) +{ + using value_type = typename tensor_core< TE1 >::value_type; + + static_assert( + std::is_same_v::value_type>, + "error in boost::numeric::ublas::inner_prod(tensor_core< TE1 > const&, tensor_core< TensorEngine2 > const&): " + "Both the tensor should have the same value_type" + ); + + if (a.rank() != b.rank()) + throw std::length_error("error in boost::numeric::ublas::inner_prod: Rank of both the tensors must be the same."); + + if (a.empty() || b.empty()) + throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensors should not be empty."); + + //if (a.extents() != b.extents()) + if (::operator!=(a.extents(),b.extents())) + throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensor extents should be the same."); + + return inner(a.rank(), a.extents().data(), + a.data(), a.strides().data(), + b.data(), b.strides().data(), value_type{0}); +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/function/norm.hpp b/include/boost/numeric/ublas/tensor/function/norm.hpp new file mode 100644 index 000000000..7f4922f49 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/norm.hpp @@ -0,0 +1,60 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP + +#include +#include +#include + + +#include "../traits/basic_type_traits.hpp" + +namespace boost::numeric::ublas +{ +template +class tensor_core; +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +/** + * + * @brief Computes the frobenius nor of a tensor + * + * @note Calls accumulate on the tensor. + * + * implements + * k = sqrt( sum_(i1,...,ip) A(i1,...,ip)^2 ) + * + * @tparam V the data type of tensor + * @tparam F the format of tensor storage + * @tparam A the array_type of tensor + * @param a the tensor whose norm is expected of rank p. + * @return the frobenius norm of a tensor. + */ +template +inline auto norm(tensor_core< TE > const &a) +{ + using value_type = typename tensor_core< TE >::value_type; + + if (a.empty()) { + throw std::runtime_error("Error in boost::numeric::ublas::norm: tensors should not be empty."); + } + + return std::sqrt(accumulate(a.order(), a.extents().data(), a.data(), a.strides().data(), value_type{}, + [](auto const &l, auto const &r) { return l + r * r; })); +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP diff --git a/include/boost/numeric/ublas/tensor/function/outer_prod.hpp b/include/boost/numeric/ublas/tensor/function/outer_prod.hpp new file mode 100644 index 000000000..2adb6fef7 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/outer_prod.hpp @@ -0,0 +1,283 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_OUTER_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_OUTER_HPP + +#include +#include + +#include "../extents.hpp" +#include "../multiplication.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" + + +namespace boost::numeric::ublas +{ + +template +struct tensor_engine; + +template +class tensor_core; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + + +namespace detail{ +/** Enables if E1 or E1 is dynamic extents with static rank + * + * extents< > & extents + * extents & extents< > + * extents< > & extents< > + * + */ +template< + class TEA, + class TEB, + class EA = typename tensor_core::extents_type, + class EB = typename tensor_core::extents_type + > +using enable_outer_if_one_extents_has_dynamic_rank = std::enable_if_t< + ( is_dynamic_rank_v || is_dynamic_rank_v) && + (!is_static_v || !is_static_v ) , bool >; + +} // namespace detail + +/** @brief Computes the outer product of two tensors + * + * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq] + * + * @note calls outer function + * + * @param[in] a tensor object A + * @param[in] b tensor object B + * + * @returns tensor object C with the same storage format F and allocator type A1 + */ +template = true > +inline auto outer_prod( tensor_core< TEA > const &a, tensor_core< TEB > const &b) +{ + using tensorA = tensor_core< TEA >; + using tensorB = tensor_core< TEB >; + using valueA = typename tensorA::value_type; + using extentsA = typename tensorA::extents_type; + + using valueB = typename tensorB::value_type; + using extentsB = typename tensorB::extents_type; + + using tensorC = std::conditional_t < is_dynamic_rank_v, tensorA, tensorB>; +// using valueC = typename tensorC::value_type; + using extentsC = typename tensorC::extents_type; + + static_assert( std::is_same_v ); + static_assert( is_dynamic_rank_v || is_dynamic_rank_v); + + if (a.empty() || b.empty()){ + throw std::runtime_error("Error in boost::numeric::ublas::outer_prod: tensors should not be empty."); + } + + auto const& na = a.extents(); + auto const& nb = b.extents(); + + auto nc_base = typename extentsC::base_type(ublas::size(na)+ublas::size(nb)); + auto nci = std::copy(ublas::begin(na),ublas::end(na), std::begin(nc_base)); + std::copy(ublas::begin(nb),ublas::end(nb), nci); + auto nc = extentsC(nc_base); + + auto c = tensorC( nc, valueA{} ); + + outer(c.data(), c.rank(), nc.data(), c.strides().data(), + a.data(), a.rank(), na.data(), a.strides().data(), + b.data(), b.rank(), nb.data(), b.strides().data()); + + return c; +} + + +namespace detail{ +/** Enables if extents E1, E1 + * + * both are dynamic extents with static rank + * + * extents & extents + * + */ +template< + class TEA, + class TEB, + class E1 = typename tensor_core::extents_type, + class E2 = typename tensor_core::extents_type + > +using enable_outer_if_both_extents_have_static_rank = std::enable_if_t< + ( is_static_rank_v && is_dynamic_v) && + ( is_static_rank_v && is_dynamic_v) , bool >; +} // namespace detail + +/** @brief Computes the outer product of two tensors + * + * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq] + * + * @note calls outer function + * + * @param[in] a tensor object A + * @param[in] b tensor object B + * + * @returns tensor object C with the same storage format F and allocator type A1 + */ +template = true > +inline auto outer_prod(tensor_core const &a, tensor_core const &b) +{ + using tensorA = tensor_core; + using valueA = typename tensorA::value_type; + using layoutA = typename tensorA::layout_type; + using extentsA = typename tensorA::extents_type; + using containerA = typename tensorA::container_type; + using resizableA_tag = typename tensorA::resizable_tag; + + using tensorB = tensor_core; + using valueB = typename tensorB::value_type; +// using layoutB = typename tensorB::layout_type; + using extentsB = typename tensorB::extents_type; + using resizableB_tag = typename tensorB::resizable_tag; + + static_assert(std::is_same_v); + static_assert(is_static_rank_v || is_static_rank_v); + + static_assert(std::is_same_v); + static_assert(std::is_same_v); + + if (a.empty() || b.empty()) + throw std::runtime_error("error in boost::numeric::ublas::outer_prod: tensors should not be empty."); + + auto const& na = a.extents(); + auto const& nb = b.extents(); + + constexpr auto sizeA = std::tuple_size_v; + constexpr auto sizeB = std::tuple_size_v; + + using extentsC = extents; + using tensorC = tensor_core>; + + auto nc_base = typename extentsC::base_type{}; + auto nci = std::copy(ublas::begin(na), ublas::end(na), std::begin(nc_base)); + std::copy(ublas::begin(nb),ublas::end(nb), nci); + auto nc = extentsC( nc_base ); + + auto c = tensorC( nc ); + + outer(c.data(), c.rank(), nc.data(), c.strides().data(), + a.data(), a.rank(), na.data(), a.strides().data(), + b.data(), b.rank(), nb.data(), b.strides().data()); + + return c; +} + + +namespace detail { + +// concat two static_stride_list togather +// @code using type = typename concat< static_stride_list, static_stride_list >::type @endcode +template +struct concat; + +template +struct concat< basic_static_extents, basic_static_extents > { + using type = basic_static_extents; +}; + +template +using concat_t = typename concat::type; + +} // namespace detail + +namespace detail { +/** Enables if extents E1, E1 + * + * both are dynamic extents with static rank + * + * extents & extents + * + */ +template< + class TEA, + class TEB, + class E1 = typename tensor_core::extents_type, + class E2 = typename tensor_core::extents_type + > +using enable_outer_if_both_extents_are_static = std::enable_if_t< + ( is_static_v && is_static_v) , bool>; + +} // namespace detail +/** @brief Computes the outer product of two tensors + * + * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq] + * + * @note calls outer function + * + * @param[in] a tensor object A + * @param[in] b tensor object B + * + * @returns tensor object C with the same storage format F and allocator type A1 + */ +template = true > +inline decltype(auto) outer_prod(tensor_core const &a, tensor_core const &b) +{ + using tensorA = tensor_core; + using valueA = typename tensorA::value_type; + using layoutA = typename tensorA::layout_type; + using extentsA = typename tensorA::extents_type; + using arrayA = typename tensorA::array_type; +// using resizableA_tag = typename tensorA::resizable_tag; + + using tensorB = tensor_core; + using valueB = typename tensorB::value_type; +// using layoutB = typename tensorB::layout_type; + using extentsB = typename tensorB::extents_type; +// using resizableB_tag = typename tensorB::resizable_tag; + + using extentsC = ublas::cat_t;// detail::concat_t; + using layoutC = layoutA; + using valueC = valueA; + using storageC = rebind_storage_size_t; + using tensorC = tensor_core>; + + static_assert(std::is_same_v); + static_assert(is_static_v || is_static_v); + + constexpr auto extentsA_size = std::tuple_size_v; + constexpr auto extentsB_size = std::tuple_size_v; + + + if (a.empty() || b.empty()) + throw std::runtime_error("error in boost::numeric::ublas::outer_prod: tensors should not be empty."); + + auto nc = extentsC{}; + + auto const& na = a.extents(); + auto const& nb = b.extents(); + + auto c = tensorC(valueC{}); + + outer(c.data(), c.rank(), data(nc), c.getStrides().data(), + a.data(), a.rank(), data(na), a.getStrides().data(), + b.data(), b.rank(), data(nb), b.getStrides().data()); + + return c; +} + + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/function/real.hpp b/include/boost/numeric/ublas/tensor/function/real.hpp new file mode 100644 index 000000000..302637776 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/real.hpp @@ -0,0 +1,80 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_REAL_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_REAL_HPP + +#include +#include +#include + + +#include "../extents/extents_functions.hpp" +#include "../traits/basic_type_traits.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" + +namespace boost::numeric::ublas +{ +template +class tensor_core; + +template +struct tensor_engine; +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +/** @brief Extract the real component of tensor elements within a tensor expression + * + * @param[in] lhs tensor expression + * @returns unary tensor expression + */ +template +auto real(detail::tensor_expression const& expr) { + return detail::make_unary_tensor_expression (expr(), [] (auto const& l) { return std::real( l ); } ); +} + +/** @brief Extract the real component of tensor elements within a tensor expression + * + * @param[in] lhs tensor expression + * @returns unary tensor expression + */ +template::value_type>, int > = 0 + > +auto real(detail::tensor_expression< tensor_core< TE > ,D > const& expr) +{ + + using tensor_type = tensor_core< TE >; + using complex_type = typename tensor_type::value_type; + using value_type = typename complex_type::value_type; + using layout_type = typename tensor_type::layout_type; + using container_type = typename tensor_type::container_type; + using extents_type = typename tensor_type::extents_type; + using storage_type = typename container_traits::template rebind; + using return_tensor_engine = tensor_engine; + using return_tensor_type = tensor_core; + + if( ublas::empty ( detail::retrieve_extents( expr ) ) ) + throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty."); + + auto a = tensor_type( expr ); + auto c = return_tensor_type( a.extents() ); + + std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::real(l) ; } ); + + return c; +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/function/reshape.hpp b/include/boost/numeric/ublas/tensor/function/reshape.hpp new file mode 100644 index 000000000..d27e59446 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/reshape.hpp @@ -0,0 +1,87 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_RESHAPE_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_RESHAPE_HPP + +#include "../extents.hpp" +#include "../tensor.hpp" + +namespace boost::numeric::ublas{ + +/** Enables prod(ttt) if E1 or E1 is dynamic extents with static rank + * + * extents< > & extents + * extents & extents< > + * extents< > & extents< > + * + */ +template< + class TE, + class E = typename tensor_core< TE >::extents_type + > +using enable_reshape_if_shape_is_dynamic = std::enable_if_t< is_dynamic_v , bool >; + +/** @brief Reshapes the basic_tensor + * + * + * (1) @code auto b = a.reshape(extents{m,n,o}); @endcode or + * (2) @code auto b = a.reshape(extents{m,n,o},4); @endcode + * + * If the size of this smaller than the specified extents than + * default constructed (1) or specified (2) value is appended. + * + * @note rank of the basic_tensor might also change. + * + * @param e extents with which the basic_tensor is reshaped. + * @param v value which is appended if the basic_tensor is enlarged. + */ +template< class E, class D, + enable_reshape_if_shape_is_dynamic = true> +[[nodiscard]] constexpr auto reshape (tensor_core const& t, extents_base const& e, [[maybe_unused]] typename tensor_core::value_type v = {}) +{ + using from_engine_type = E; + using from_tensor_type = tensor_core; +// using from_extents_type = typename from_tensor_type::extents_type; + using from_container_type = typename from_tensor_type::container_type; + using from_layout_type = typename from_tensor_type::layout_type; + + using to_extents_type = D; + using to_engine_type = tensor_engine; + using to_tensor_type = tensor_core; + + auto const& efrom = t.extents(); + auto const& eto = e(); + + if( ::operator==(efrom,eto) ) + return t; + + auto const to_size = product(eto); + auto const from_size = product(efrom); + + + auto r = to_tensor_type(eto); + const auto m = std::min(to_size,from_size); + std::copy(t.begin() , t.begin()+m ,r.begin() ); + + if(m < to_size){ + const auto n = to_size - m; + std::fill_n(r.begin()+m,n,v); + } + + return r; +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_COMMON_HPP diff --git a/include/boost/numeric/ublas/tensor/function/tensor_times_matrix.hpp b/include/boost/numeric/ublas/tensor/function/tensor_times_matrix.hpp new file mode 100644 index 000000000..9e5c9acff --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/tensor_times_matrix.hpp @@ -0,0 +1,256 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_TTM_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_TTM_HPP + +#include +#include +#include + +#include "../extents.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" +#include "../tensor.hpp" + + +/** @brief Computes the m-mode tensor-times-matrix product + * + * Implements C[i1,...,im-1,j,im+1,...,ip] = A[i1,i2,...,ip] * B[j,im] + * + * @note calls ublas::ttm + * + * @param[in] a tensor object A with order p + * @param[in] b vector object B + * @param[in] m contraction dimension with 1 <= m <= p + * + * @returns tensor object C with order p, the same storage format and allocator type as A + */ + +//namespace boost::numeric::ublas +//{ + +//template +//struct tensor_engine; + +//template +//class tensor_core; + +//template +//class matrix; + +//} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +namespace detail { +template< class TE, class E = typename tensor_core::extents_type > +using enable_ttm_if_extent_is_modifiable = std::enable_if_t, bool>; +} // namespace detail + +template ::value_type, + typename L = typename tensor_core::layout_type, + detail::enable_ttm_if_extent_is_modifiable = true > +inline decltype(auto) prod( tensor_core< TE > const &a, matrix const &b, const std::size_t m) +{ + using tensor_type = tensor_core< TE >; + using extents_type = typename tensor_type::extents_type; + using layout_type = typename tensor_type::layout_type; + using resizeable_tag = typename tensor_type::resizable_tag; + + static_assert(std::is_same_v ); + static_assert(is_dynamic_v); + + auto const p = a.rank(); + auto const& na = a.extents(); + auto nb = extents<2>{std::size_t(b.size1()), std::size_t(b.size2())}; + + assert( p != 0 ); + assert( p == ublas::size(na)); + + if( m == 0 ) throw std::length_error("Error in boost::numeric::ublas::ttm: contraction mode must be greater than zero."); + if( p < m ) throw std::length_error("Error in boost::numeric::ublas::ttm: tensor order must be greater than or equal to the specified mode."); + if(na[m-1]!=nb[1]) throw std::invalid_argument("Error in boost::numeric::ublas::ttm: 2nd extent of B and m-th extent of A must be equal."); + + + auto nc_base = na.base(); + auto wb = ublas::to_strides(nb,layout_type{}); + nc_base[m-1] = nb[0]; + auto nc = extents_type(nc_base); + auto c = tensor_type(nc); + + assert( std::equal(begin(na) , begin(na)+m-1, begin(nc) ) ); + assert( std::equal(begin(na)+m, end (na), begin(nc)+m) ); + assert( nc[m-1] == nb[0] ); + + auto const* bb = &(b(0, 0)); + ttm(m, p, + c.data(), c.extents().data(), c.strides().data(), + a.data(), a.extents().data(), a.strides().data(), + bb, nb.data(), wb.data()); + + return c; +} + + +/** @brief Computes the m-mode tensor-times-matrix product + * + * Implements C[i1,...,im-1,j,im+1,...,ip] = A[i1,i2,...,ip] * B[j,im] + * + * @note calls ublas::ttm + * + * @tparam M contraction dimension with 1 <= M <= p + * @tparam N is a non contracting dimension + * @tparam TE TensorEngine is used for the tensor + * + * @param[in] a tensor object A with order p + * @param[in] b vector object B + * + * @returns tensor object C with order p, the same storage format and allocator type as A + */ + +namespace detail { +template< class TE, class E = typename tensor_core< TE >::extents_type > +using enable_ttm_if_extent_is_not_resizable = + std::enable_if_t && is_dynamic_v, bool>; +} // namespace detail + +template ::value_type, + typename L = typename tensor_core< TE >::layout_type, + detail::enable_ttm_if_extent_is_not_resizable> +inline decltype(auto) prod(tensor_core const &a, matrix const &b) +{ + using tensor_type = tensor_core; + using extents_type = typename tensor_type::extents_type; + using layout_type = typename tensor_type::layout_type; + using resizeable_tag = typename tensor_type::resizable_tag; + + static_assert(std::is_same_v ); + static_assert(is_dynamic_v); + + constexpr auto p = std::tuple_size_v; + + auto const& na = a.extents(); + auto nb = extents<2>{std::size_t(b.size1()), std::size_t(b.size2())}; + + static_assert( p != 0 ); + static_assert( p == a.rank()); + static_assert( m != 0); + static_assert( p < m); + + if(na[m-1]!=nb[1]) throw std::invalid_argument("Error in boost::numeric::ublas::ttm: 2nd extent of B and m-th extent of A must be equal."); + + auto nc_base = na.base(); + auto wb = ublas::to_strides(nb,layout_type{}); + + std::get(nc_base) = std::get<0>(nb.base()); + + auto nc = extents_type(nc_base); + auto c = tensor_type(nc); + + assert(std::equal(begin(na) , begin(na)+m-1, begin(nc) )); + assert(std::equal(begin(na)+m, end (na), begin(nc)+m)); + assert(nc[m-1] == nb[0]); + + auto bbdata = &(b(0, 0)); + + auto const& wa = a.strides(); + auto const& wc = c.strides(); + + ttm(m, p, + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + bbdata , nb.data(), wb.data()); + + return c; +} + + +//namespace detail { +//template< +// class TEL, +// class TER, +// class EL = typename TEL::extents_type, +// class ER = typename TER::extents_type +// > +//using enable_ttm_if_extent_is_static = +// std::enable_if_t && is_static_v, bool>; +//} // namespace detail + +//template +//inline decltype(auto) prod( tensor_core const& a, tensor_core const &b) +//{ +// using tensorA = tensor_core; +// using extentsA = typename tensorA::extents_type; +// using layout = typename tensorA::layout_type; +// using resizeable_tag = typename tensorA::resizable_tag; + +// static_assert(std::is_same_v ); +// static_assert(is_static_v); + +// constexpr auto p = size_v; + + +// auto const& na = a.extents(); +// auto const& nb = b.extents(); + +// static_assert( p != 0 ); +// static_assert( p == a.rank()); +// static_assert( m != 0); +// static_assert( p < m); + +// static_assert(get_v != get_v); + +// if(na[m-1]!=nb[1]) throw std::invalid_argument("Error in boost::numeric::ublas::ttm: 2nd extent of B and m-th extent of A must be equal."); + +// auto nc_base = na.base(); +// auto wb = ublas::to_strides(nb,layout{}); + +// std::get(nc_base) = std::get<0>(nb.base()); + +// auto nc = extents_type(nc_base); +// auto c = tensor_type(nc); + +// assert(std::equal(na.begin() , na.begin()+m-1, nc.begin())); +// assert(std::equal(na.begin()+m, na.end, nc.begin())); +// assert(nc[m-1] == nb[0]); + +// auto bbdata = &(b(0, 0)); + +// auto const& wa = a.strides(); +// auto const& wc = c.strides(); + +// ttm(m, p, +// c.data(), nc.data(), wc.data(), +// a.data(), na.data(), wa.data(), +// bbdata , nb.data(), wb.data()); + +// return c; +//} + + + +//using value_type = typename tensor_type::value_type; +//using container_type = typename tensor_type::container_type; +//using return_extents_type = std::decay_t; +//using return_container_type = rebind_storage_size_t; +//using return_tensor_type = tensor_core>; + +//auto c = return_tensor_type(value_type{}); + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP diff --git a/include/boost/numeric/ublas/tensor/function/tensor_times_tensor.hpp b/include/boost/numeric/ublas/tensor/function/tensor_times_tensor.hpp new file mode 100644 index 000000000..aed107711 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/tensor_times_tensor.hpp @@ -0,0 +1,337 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_TTT_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_TTT_HPP + +#include +#include +#include + +#include "../extents.hpp" +#include "../tags.hpp" +#include "../tensor.hpp" +#include "../type_traits.hpp" + + +namespace boost::numeric::ublas +{ + +template +struct tensor_engine; + +template +class tensor_core; + +template +class matrix; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +namespace detail +{ +/** Enables prod(ttt) if E1 or E1 is dynamic extents with static rank + * + * extents< > & extents + * extents & extents< > + * extents< > & extents< > + * + */ +template< + class TEA, + class TEB, + class EA = typename tensor_core< TEA >::extents_type, + class EB = typename tensor_core< TEB >::extents_type + > +using enable_ttt_if_one_extents_has_dynamic_rank = std::enable_if_t< + ( is_dynamic_rank_v || is_dynamic_rank_v) && + (!is_static_v || !is_static_v ) , bool >; +} // namespace detail +/** @brief Computes the q-mode tensor-times-tensor product + * + * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) + * + * @note calls ublas::ttt + * + * na[phia[x]] = nb[phib[x]] for 1 <= x <= q + * + * @param[in] a left-hand side tensor with order r+q + * @param[in] b right-hand side tensor with order s+q + * @param[in] phia one-based permutation tuple of length q for the first input tensor a can be of type std::vector or std::array + * @param[in] phib one-based permutation tuple of length q for the second input tensor b can be of type std::vector or std::array + * @result tensor with order r+s + */ +template = true > +inline decltype(auto) prod(tensor_core< TEA > const &a, + tensor_core< TEB > const &b, + std::vector const &phia, + std::vector const &phib) +{ + using tensorA_type = tensor_core< TEA >; + using tensorB_type = tensor_core< TEB >; + using extentsA_type = typename tensorA_type::extents_type; + using extentsB_type = typename tensorB_type::extents_type; + using layoutA_type = typename tensorA_type::layout_type; + using container_type = typename tensorA_type::container_type; + using resizableA_tag = typename tensorA_type::resizable_tag; + using resizableB_tag = typename tensorB_type::resizable_tag; + using valueA_type = typename tensorA_type::value_type; + using valueB_type = typename tensorB_type::value_type; + + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + + static_assert(is_dynamic_rank_v || is_dynamic_rank_v); + + + auto const pa = a.rank(); + auto const pb = b.rank(); + + auto const q = std::size_t{phia.size()}; + + if (pa == 0ul) throw std::runtime_error("error in ublas::prod(ttt): order of left-hand side tensor must be greater than 0."); + if (pb == 0ul) throw std::runtime_error("error in ublas::prod(ttt): order of right-hand side tensor must be greater than 0."); + if (pa < q) throw std::runtime_error("error in ublas::prod(ttt): number of contraction dimensions cannot be greater than the order of the left-hand side tensor."); + if (pb < q) throw std::runtime_error("error in ublas::prod(ttt): number of contraction dimensions cannot be greater than the order of the right-hand side tensor."); + if (q != phib.size()) throw std::runtime_error("error in ublas::prod(ttt): permutation tuples must have the same length."); + if (pa < phia.size()) throw std::runtime_error("error in ublas::prod(ttt): permutation tuple for the left-hand side tensor cannot be greater than the corresponding order."); + if (pb < phib.size()) throw std::runtime_error("error in ublas::prod(ttt): permutation tuple for the right-hand side tensor cannot be greater than the corresponding order."); + + auto const &na = a.extents(); + auto const &nb = b.extents(); + + for (auto i = 0ul; i < q; ++i) + if (na.at(phia.at(i) - 1) != nb.at(phib.at(i) - 1)) + throw std::runtime_error("error in ublas::prod: permutations of the extents are not correct."); + + auto const r = pa - q; + auto const s = pb - q; + + auto phia1 = std::vector(pa); + auto phib1 = std::vector(pb); + std::iota(phia1.begin(), phia1.end(), std::size_t(1)); + std::iota(phib1.begin(), phib1.end(), std::size_t(1)); + + using dynamic_extents = std::conditional_t, extentsA_type, extentsB_type>; + using extents_base = typename dynamic_extents::base_type; + auto const size = std::size_t(pa+pb-2*q); + auto nc_base = extents_base (std::max(size,std::size_t{2}),std::size_t{1}); + + //for (auto i = 0ul; i < phia.size(); ++i) + for (auto p : phia) + *std::remove(phia1.begin(), phia1.end(), p) = p; + //phia1.erase( std::remove(phia1.begin(), phia1.end(), phia.at(i)), phia1.end() ) ; + + for (auto i = 0ul; i < r; ++i) + nc_base[i] = na[phia1[i] - 1]; + + //for (auto i = 0ul; i < phib.size(); ++i) + for (auto p : phib) + *std::remove(phib1.begin(), phib1.end(), p) = p; + //phib1.erase( std::remove(phib1.begin(), phib1.end(), phia.at(i)), phib1.end() ) ; + + for (auto i = 0ul; i < s; ++i) + nc_base[r + i] = nb[phib1[i] - 1]; + + assert(phia1.size() == pa); + assert(phib1.size() == pb); + + auto nc = dynamic_extents(nc_base); + + using return_tensor_type = tensor_core>; + auto c = return_tensor_type( nc, valueA_type{} ); + + ttt(pa, pb, q, + phia1.data(), phib1.data(), + c.data(), c.extents().data(), c.strides().data(), + a.data(), a.extents().data(), a.strides().data(), + b.data(), b.extents().data(), b.strides().data()); + + return c; +} + + + +/** @brief Computes the q-mode tensor-times-tensor product + * + * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) + * + * @note calls ublas::ttt + * + * na[phi[x]] = nb[phi[x]] for 1 <= x <= q + * + * @param[in] a left-hand side tensor with order r+q + * @param[in] b right-hand side tensor with order s+q + * @param[in] phi one-based permutation tuple of length q for both input + * tensors can be of type std::vector or std::array + * @result tensor with order r+s + */ +template = true > +inline decltype(auto) prod(tensor_core const &a, + tensor_core const &b, + std::vector const &phi) +{ + return prod(a, b, phi, phi); +} + + + +namespace detail +{ + +/** Enables if extents E1, E1 are dynamic extents with static rank + * + * extents & extents + * + */ +template< + class TE1, + class TE2, + class E1 = typename tensor_core< TE1 >::extents_type, + class E2 = typename tensor_core< TE2 >::extents_type + > +using enable_ttt_if_extents_have_static_rank = std::enable_if_t< + (is_static_rank_v && is_dynamic_v) && + (is_static_rank_v && is_dynamic_v) , bool>; + +} // namespace detail + +/** @brief Computes the q-mode tensor-times-tensor product + * + * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) + * + * @note calls ublas::ttt + * + * na[phia[x]] = nb[phib[x]] for 1 <= x <= q + * + * @param[in] a left-hand side tensor with order r+q + * @param[in] b right-hand side tensor with order s+q + * @param[in] phia one-based permutation tuple of length q for the first input tensor a + * @param[in] phib one-based permutation tuple of length q for the second input tensor b + * @result tensor with order r+s + */ +template = true > +inline auto prod(tensor_core const &a, + tensor_core const &b, + std::array const &phia, + std::array const &phib) +{ + using tensorA_type = tensor_core; + using tensorB_type = tensor_core; + using extentsA_type = typename tensorA_type::extents_type; + using extentsB_type = typename tensorB_type::extents_type; + using valueA_type = typename tensorA_type::value_type; + using valueB_type = typename tensorB_type::value_type; + using layout_type = typename tensorA_type::layout_type; + using container_type = typename tensorA_type::container_type; + using resizeableA_tag = typename tensorA_type::resizable_tag; + using resizeableB_tag = typename tensorB_type::resizable_tag; + + + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + + constexpr auto q = Q; + constexpr auto pa = std::tuple_size_v; + constexpr auto pb = std::tuple_size_v; + + static_assert(pa != 0); + static_assert(pb != 0); + static_assert(pa >= q); + static_assert(pb >= q); + +// if (pa < phia.size()) throw std::runtime_error("error in ublas::prod: permutation tuple for the left-hand side tensor cannot be greater than the corresponding order."); +// if (pb < phib.size()) throw std::runtime_error("error in ublas::prod: permutation tuple for the right-hand side tensor cannot be greater than the corresponding order."); + + auto const &na = a.extents(); + auto const &nb = b.extents(); + + for (auto i = 0ul; i < q; ++i) + if (na.at(phia.at(i) - 1) != nb.at(phib.at(i) - 1)) + throw std::runtime_error("error in ublas::prod: permutations of the extents are not correct."); + + constexpr auto r = pa - q; + constexpr auto s = pb - q; + + auto phia1 = std::array{}; + auto phib1 = std::array{}; + std::iota(phia1.begin(), phia1.end(),std::size_t(1)); + std::iota(phib1.begin(), phib1.end(),std::size_t(1)); + + constexpr auto const msz = std::max(std::size_t(r+s), std::size_t(2)); + using return_extents_type = extents; + auto nc_base = std::array{}; + + for (auto i = 0ul; i < phia.size(); ++i) + *std::remove(phia1.begin(), phia1.end(), phia.at(i)) = phia.at(i); + //phia1.erase( std::remove(phia1.begin(), phia1.end(), phia.at(i)), phia1.end() ) ; + + for (auto i = 0ul; i < phib.size(); ++i) + *std::remove(phib1.begin(), phib1.end(), phib.at(i)) = phib.at(i); + //phib1.erase( std::remove(phib1.begin(), phib1.end(), phia.at(i)), phib1.end() ) ; + + for (auto i = 0ul; i < r; ++i) + nc_base[i] = na[phia1[i] - 1]; + + for (auto i = 0ul; i < s; ++i) + nc_base[r+i] = nb[phib1[i] - 1]; + + auto nc = return_extents_type(nc_base); + + using return_tensor_type = tensor_core>; + + auto c = return_tensor_type( nc ); + + ttt(pa, pb, q, + phia1.data(), phib1.data(), + c.data(), c.extents().data(), c.strides().data(), + a.data(), a.extents().data(), a.strides().data(), + b.data(), b.extents().data(), b.strides().data()); + + return c; +} + + +/** @brief Computes the q-mode tensor-times-tensor product + * + * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) + * + * @note calls ublas::ttt + * + * na[phi[x]] = nb[phi[x]] for 1 <= x <= q + * + * @param[in] a left-hand side tensor with order r+q + * @param[in] b right-hand side tensor with order s+q + * @param[in] phi one-based permutation tuple of length q for both input + * tensors can be of type std::vector or std::array + * @result tensor with order r+s + */ +template * = nullptr > +inline decltype(auto) prod(tensor_core const &a, + tensor_core const &b, + std::array const &phi) +{ + return prod(a, b, phi, phi); +} + + + + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP diff --git a/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp new file mode 100644 index 000000000..82c9b3c41 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp @@ -0,0 +1,240 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_TTV_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_TTV_HPP + +#include +#include +#include + +#include "../extents.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" + +namespace boost::numeric::ublas +{ + + +template +struct tensor_engine; + +template +class tensor_core; + +//template +//class vector; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +namespace detail { + +/** Enables if extent E is dynamic with dynamic rank: extents< > */ +template< + class TE, + class E = typename tensor_core::extents_type + > +using enable_ttv_if_extent_has_dynamic_rank = std::enable_if_t, bool>; + +} // namespace detail + + +/** @brief Computes the m-mode tensor-times-vector product + * + * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] + * + * @note calls ublas::ttv + * + * @param[in] m contraction dimension with 1 <= m <= p + * @param[in] a tensor object A with order p + * @param[in] b vector object B + * + * @returns tensor object C with order p-1, the same storage format and allocator type as A + */ +template ::value, + detail::enable_ttv_if_extent_has_dynamic_rank = true > +inline decltype(auto) prod( tensor_core< TE > const &a, vector const &b, const std::size_t m) +{ + + using tensor = tensor_core< TE >; + using shape = typename tensor::extents_type; + using value = typename tensor::value_type; + using layout = typename tensor::layout_type; + using resize_tag = typename tensor::resizable_tag; + + auto const p = a.rank(); + + static_assert(std::is_same_v); + static_assert(is_dynamic_v); + + if (m == 0ul) throw std::length_error("error in boost::numeric::ublas::prod(ttv): contraction mode must be greater than zero."); + if (p < m) throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the contraction mode."); + if (a.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty."); + if (b.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty."); + + auto const& na = a.extents(); + auto nb = extents<2>{std::size_t(b.size()),std::size_t(1ul)}; + auto wb = ublas::to_strides(nb,layout{} ); + + auto const sz = std::max( std::size_t(ublas::size(na)-1u), std::size_t(2) ); + auto nc_base = typename shape::base_type(sz,1); + + for (auto i = 0ul, j = 0ul; i < p; ++i) + if (i != m - 1) + nc_base[j++] = na.at(i); + + auto nc = shape(nc_base); + + + auto c = tensor( nc, value{} ); + auto const* bb = &(b(0)); + ttv(m, p, + c.data(), c.extents().data(), c.strides().data(), + a.data(), a.extents().data(), a.strides().data(), + bb, nb.data(), wb.data()); + return c; +} + + +namespace detail { +/** Enables if extent E is dynamic with static rank: extents */ +template< + class TE, + class E = typename tensor_core< TE >::extents_type + > +using enable_ttv_if_extent_is_dynamic_with_static_rank = + std::enable_if_t< is_static_rank_v< E > && is_dynamic_v< E >, bool>; + +} // namespace detail + + +/** @brief Computes the m-mode tensor-times-vector product + * + * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] + * + * @note calls ublas::ttv + * + * @param[in] m contraction dimension with 1 <= m <= p + * @param[in] a tensor object A with order p + * @param[in] b vector object B + * + * @returns tensor object C with order p-1, the same storage format and allocator type as A + */ +template ::value, + detail::enable_ttv_if_extent_is_dynamic_with_static_rank = true + > +inline auto prod( tensor_core< TE > const &a, vector const &b, const std::size_t m) +{ + using tensor = tensor_core< TE >; + using shape = typename tensor::extents_type; + using container = typename tensor::container_type; + using layout = typename tensor::layout_type; + using resizeable_tag = typename tensor::resizable_tag; + + constexpr auto p = std::tuple_size_v; + constexpr auto sz = std::max(std::size_t(std::tuple_size_v-1U),std::size_t(2)); + + using shape_b = ublas::extents<2>; + using shape_c = ublas::extents; + using tensor_c = tensor_core>; + + static_assert(std::is_same_v); + + if (m == 0ul) throw std::length_error("error in boost::numeric::ublas::prod(ttv): contraction mode must be greater than zero."); + if (p < m) throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the modus."); + if (a.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty."); + if (b.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty."); + + auto const& na = a.extents(); + + auto nc_base = typename shape_c::base_type{}; + std::fill(nc_base.begin(), nc_base.end(),std::size_t(1)); + for (auto i = 0ul, j = 0ul; i < p; ++i) + if (i != m - 1) + nc_base[j++] = na.at(i); + + auto nc = shape_c(std::move(nc_base)); + auto nb = shape_b{b.size(),1UL}; + auto wb = ublas::to_strides(nb,layout{}); + auto c = tensor_c( std::move(nc) ); + auto const* bb = &(b(0)); + + ttv(m, p, + c.data(), c.extents().data(), c.strides().data(), + a.data(), a.extents().data(), a.strides().data(), + bb, nb.data(), wb.data() ); + return c; +} + + + +/** @brief Computes the m-mode tensor-times-vector product + * + * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] + * + * @note calls ublas::ttv + * + * @tparam M contraction dimension with 1 <= m <= p + * @param[in] a tensor object A with order p + * @param[in] b vector object B + * + * @returns tensor object C with order p-1, the same storage format and allocator type as A + */ + +template ::value> +inline auto prod( tensor_core< TE > const &a, vector const &b) +{ + using tensor = tensor_core< TE >; + using container = typename tensor::container; + using shape = typename tensor::extents; + using layout = typename tensor::layout; + using shape_b = extents<2>; + using shape_c = remove_element_t; + using container_c = rebind_storage_size_t; + using tensor_c = tensor_core>; + + static_assert( m != 0ul ); + static_assert(std::tuple_size_v != 0 ); + static_assert(std::tuple_size_v >= m ); + + constexpr auto p = std::tuple_size_v; + + if (a.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty."); + if (b.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty."); + + auto const& na = a.extents(); + + auto nc = shape_c{}; + auto nb = shape_b{std::size_t(b.size()),std::size_t(1)}; + + auto c = tensor_c{}; + auto const* bb = &(b(0)); + + auto const& wa = a.strides(); + auto const& wc = c.strides(); + auto wb = ublas::to_strides(nb,layout{}); + + ttv(m, p, + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + bb, nb.data(), wb.data()); + + return c; +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_NORM_HPP diff --git a/include/boost/numeric/ublas/tensor/function/trans.hpp b/include/boost/numeric/ublas/tensor/function/trans.hpp new file mode 100644 index 000000000..e328b4327 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/function/trans.hpp @@ -0,0 +1,78 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_TRANS_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_TRANS_HPP + +#include +#include +#include + +#include "../extents.hpp" +#include "../traits/basic_type_traits.hpp" +#include "../multiplication.hpp" + + +namespace boost::numeric::ublas +{ +template +class tensor_core; +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas +{ + +/** @brief Transposes a tensor according to a permutation tuple + * + * Implements C[tau[i1],tau[i2]...,tau[ip]] = A[i1,i2,...,ip] + * + * @note calls trans function + * + * @param[in] a tensor object of rank p + * @param[in] tau one-based permutation tuple of length p + * @returns a transposed tensor object with the same storage format F and allocator type A + */ +template > +inline decltype(auto) trans(tensor_core< TensorEngine > const &a, PermuType const &tau) +{ + + using tensor_type = tensor_core< TensorEngine >; + using extents_type = typename tensor_type::extents_type; + + static_assert( is_dynamic_v< extents_type > ); + + auto const p = a.rank(); + auto const &na = a.extents(); + typename extents_type::base_type nc; + + if constexpr( is_dynamic_rank_v ){ + nc.resize(p); + } + + for (auto i = 0u; i < p; ++i){ + nc.at(tau.at(i) - 1) = na.at(i); + } + + auto c = tensor_type( extents_type( std::move(nc) ) ); + + if (a.empty()){ + return c; + } + + trans(a.rank(), a.extents().data(), tau.data(), + c.data(), c.strides().data(), + a.data(), a.strides().data()); + + return c; +} + +} // namespace boost::numeric::ublas + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_PROD_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/functions.hpp b/include/boost/numeric/ublas/tensor/functions.hpp index d77b6a9a4..8029d59a1 100644 --- a/include/boost/numeric/ublas/tensor/functions.hpp +++ b/include/boost/numeric/ublas/tensor/functions.hpp @@ -1,1101 +1,27 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - - -#ifndef _BOOST_UBLAS_TENSOR_FUNCTIONS_HPP_ -#define _BOOST_UBLAS_TENSOR_FUNCTIONS_HPP_ - -#include -#include - -namespace boost::numeric::ublas{ - - template class tensor_core; - -} // namespace boost::numeric::ublas - - -namespace boost::numeric::ublas -{ - namespace detail{ - - template - struct is_complex : std::false_type{}; - - template - struct is_complex< std::complex > : std::true_type{}; - - template - inline static constexpr bool is_complex_v = is_complex::value; - - /// To check if the type is the std::array or not. - /// Can be extented by providing specialization. - /// Point to Remember: C-Style arrays are not supported. - template - struct is_bounded_array : std::false_type{}; - - template - inline static constexpr bool is_bounded_array_v = is_bounded_array::value; - - template - struct is_bounded_array> : std::true_type{}; - - /// Gives the extent of rank one std::array. - /// Similar to is_bounded_array, it can also be - /// extented using specialization. - /// Custom Type should have similar APIs to - /// std::array. - /// Point to Remember: C-Style arrays are not supported. - template - struct extent_of_rank_one_array; - - template - struct extent_of_rank_one_array> - : std::integral_constant - {}; - - template - inline static constexpr bool extent_of_rank_one_array_v = extent_of_rank_one_array::value; - - } // namespace detail - - /** @brief Computes the m-mode tensor-times-vector product - * - * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] - * - * @note calls ublas::ttv - * - * @param[in] m contraction dimension with 1 <= m <= p - * @param[in] a tensor object A with order p - * @param[in] b vector object B - * - * @returns tensor object C with order p-1, the same storage format and allocator type as A - */ - template - inline decltype(auto) prod( tensor_core< TensorEngine > const &a, - vector::value_type, A> const &b, - const std::size_t m) - { - using tensor_type = tensor_core< TensorEngine >; - using extents_type = typename tensor_type::extents_type; - using value_type = typename tensor_type::value_type; - using array_type = typename tensor_type::array_type; - using size_type = typename extents_type::size_type; - using layout_type = typename tensor_type::layout_type; - - auto const p = a.rank(); - - static_assert( - std::is_same_v< - typename tensor_core::resizable_tag, - storage_resizable_container_tag - >, - "error in boost::numeric::ublas::prod(tensor_core const&, vector const& ): " - "tensor container should be resizable" - ); - - static_assert( - is_dynamic_v, - "error in boost::numeric::ublas::prod(tensor_core const&, vector const& ): " - "extents type should be dynamic" - ); - - if (m == 0ul) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttv): " - "contraction mode must be greater than zero."); - - if (p < m) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttv): rank of tensor must be " - "greater than or equal to the modus."); - - if (a.empty()) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttv): first " - "argument tensor should not be empty."); - - if (b.empty()) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttv): second " - "argument vector should not be empty."); - - using extents_value_type = typename extents_type::value_type; - - auto a_extents = a.extents(); - - auto extents_result = [&a_extents](){ - using size_type = typename extents_type::size_type; - if constexpr( is_static_rank_v ){ - // To disable the warning for unused variable; - (void)a_extents; - constexpr size_type esz = extents_type::_size - 1u; - constexpr auto sz = std::max( esz, size_type(2) ); - auto ret = extents< sz >(); - ret.fill(1u); - return ret; - }else{ - using extents_base_type = typename extents_type::base_type; - auto const sz = std::max( a_extents.size() - 1, size_type(2) ); - auto arr = extents_base_type(sz,1); - return extents_type{ std::move(arr) } ; - } - }; - - auto nc = extents_result(); - auto nb = std::vector{b.size(), extents_value_type(1)}; - - for (auto i = size_type(0), j = size_type(0); i < p; ++i) - if (i != m - 1) - nc[j++] = a_extents.at(i); - - using c_extents_type = std::decay_t< decltype(nc) >; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - array_type - >; - - auto c = tensor_core( nc, value_type{} ); - auto bb = &(b(0)); - ttv(m, p, - c.data(), c.extents().data(), c.strides().data(), - a.data(), a.extents().data(), a.strides().data(), - bb, nb.data(), nb.data()); - return c; - } - - /** @brief Computes the m-mode tensor-times-matrix product - * - * Implements C[i1,...,im-1,j,im+1,...,ip] = A[i1,i2,...,ip] * B[j,im] - * - * @note calls ublas::ttm - * - * @param[in] a tensor object A with order p - * @param[in] b vector object B - * @param[in] m contraction dimension with 1 <= m <= p - * - * @returns tensor object C with order p, the same storage format and allocator type as A - */ - template - inline decltype(auto) prod( tensor_core< TensorEngine > const &a, - matrix::value_type, typename tensor_core< TensorEngine >::layout_type , A> const &b, - const std::size_t m) - { - - using tensor_type = tensor_core< TensorEngine >; - using extents_type = typename tensor_type::extents_type; - using value_type = typename tensor_type::value_type; - using layout_type = typename tensor_type::layout_type; - using array_type = typename tensor_type::array_type; - using dynamic_strides_type = basic_strides; - - - static_assert( - std::is_same_v< - typename tensor_core::resizable_tag, - storage_resizable_container_tag - >, - "error in boost::numeric::ublas::prod(tensor_core const&, matrix const& ): " - "tensor container should be resizable" - ); - - static_assert( - is_dynamic_v, - "error in boost::numeric::ublas::prod(tensor_core const&, matrix const& ): " - "extents type should be dynamic" - ); - - auto const p = a.rank(); - - if (m == 0ul) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttm): " - "contraction mode must be greater than zero."); - - if (p < m || m > a.extents().size()) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttm): rank " - "of the tensor must be greater equal the modus."); - - if (a.empty()) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttm): first " - "argument tensor should not be empty."); - - if (b.size1() * b.size2() == 0ul) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttm): second " - "argument matrix should not be empty."); - - auto nc = a.extents(); - auto nb = extents<>{b.size1(), b.size2()}; - - auto wb = dynamic_strides_type(nb); - - nc[m - 1] = nb[0]; - - using c_extents_type = std::decay_t< decltype(nc) >; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - array_type - >; - - auto c = tensor_core(nc, value_type{}); - - auto bb = &(b(0, 0)); - ttm(m, p, - c.data(), c.extents().data(), c.strides().data(), - a.data(), a.extents().data(), a.strides().data(), - bb, nb.data(), wb.data()); - - return c; - } - - /** @brief Computes the q-mode tensor-times-tensor product - * - * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) - * - * @note calls ublas::ttt - * - * na[phia[x]] = nb[phib[x]] for 1 <= x <= q - * - * @param[in] a left-hand side tensor with order r+q - * @param[in] b right-hand side tensor with order s+q - * @param[in] phia one-based permutation tuple of length q for the first - * input tensor a can be of type std::vector or std::array - * @param[in] phib one-based permutation tuple of length q for the second - * input tensor b can be of type std::vector or std::array - * @result tensor with order r+s - */ - template , - std::enable_if_t< - !( is_static_v::extents_type> || - is_static_v::extents_type> ) - ,int> = 0 - > - inline decltype(auto) prod(tensor_core< TensorEngine1 > const &a, tensor_core< TensorEngine2 > const &b, - PermuType const &phia, PermuType const &phib) - { - using tensor_type = tensor_core< TensorEngine1 >; - using extents_type = typename tensor_type::extents_type; - using value_type = typename tensor_type::value_type; - using layout_type = typename tensor_type::layout_type; - using extents_size_type = typename extents_type::size_type; - using array_type = typename tensor_type::array_type; - - static_assert( - std::is_same_v< - typename tensor_core::resizable_tag, - typename tensor_core::resizable_tag - > && - std::is_same_v< - typename tensor_core::resizable_tag, - storage_resizable_container_tag - >, - "error in boost::numeric::ublas::prod(tensor_core const&, tensor_core const&, " - "PermuType const&, PermuType const& ): " - "Both the tensor storage should have the same type of storage and both should be resizable" - ); - - static_assert( - std::is_same_v::value_type>, - "error in boost::numeric::ublas::prod(tensor_core< TensorEngine1 > const&, tensor_core< TensorEngine2 > const&, " - "PermuType const&, PermuType const&): " - "Both the tensor should have the same value_type" - ); - - auto const pa = a.rank(); - auto const pb = b.rank(); - - auto const q = static_cast(phia.size()); - - if (pa == 0ul) - throw std::runtime_error("error in ublas::prod: order of left-hand side tensor must be greater than 0."); - if (pb == 0ul) - throw std::runtime_error("error in ublas::prod: order of right-hand side tensor must be greater than 0."); - if (pa < q) - throw std::runtime_error("error in ublas::prod: number of contraction dimensions cannot be greater than the order of the left-hand side tensor."); - if (pb < q) - throw std::runtime_error("error in ublas::prod: number of contraction dimensions cannot be greater than the order of the right-hand side tensor."); - - if (q != phib.size()) - throw std::runtime_error("error in ublas::prod: permutation tuples must have the same length."); - - if (pa < phia.size()) - throw std::runtime_error("error in ublas::prod: permutation tuple for the left-hand side tensor cannot be greater than the corresponding order."); - if (pb < phib.size()) - throw std::runtime_error("error in ublas::prod: permutation tuple for the right-hand side tensor cannot be greater than the corresponding order."); - - auto const &na = a.extents(); - auto const &nb = b.extents(); - - for (auto i = 0ul; i < q; ++i) - if (na.at(phia.at(i) - 1) != nb.at(phib.at(i) - 1)) - throw std::runtime_error("error in ublas::prod: permutations of the extents are not correct."); - - std::size_t const r = pa - q; - std::size_t const s = pb - q; - - std::vector phia1(pa); - std::vector phib1(pb); - std::iota(phia1.begin(), phia1.end(), 1ul); - std::iota(phib1.begin(), phib1.end(), 1ul); - - auto extents_result = [&e1 = na, &e2 = nb, &a1 = phia, &a2 = phib](){ - using lextents_type = std::decay_t< decltype(e1) >; - using rextents_type = std::decay_t< decltype(e2) >; - using array_type = std::decay_t< decltype(a1) >; - if constexpr( - detail::is_bounded_array_v && - is_static_rank_v && - is_static_rank_v - ){ - constexpr auto const N = detail::extent_of_rank_one_array_v; - constexpr auto const sz = lextents_type::_size + rextents_type::_size - 2 * N; - auto res = extents(); - res.fill(1u); - return res; - }else{ - extents_size_type const size = ( e1.size() + e2.size() ) - ( a1.size() + a2.size() ); - using extents_base_type = typename extents<>::base_type; - auto arr = extents_base_type( std::max(size, extents_size_type(2)), 1u ); - return extents<>(std::move(arr)); - } - }; - - auto nc = extents_result(); - - for (auto i = 0ul; i < phia.size(); ++i) - *std::remove(phia1.begin(), phia1.end(), phia.at(i)) = phia.at(i); - - //phia1.erase( std::remove(phia1.begin(), phia1.end(), phia.at(i)), phia1.end() ) ; - - for (auto i = 0ul; i < r; ++i) - nc[i] = na[phia1[i] - 1]; - - for (auto i = 0ul; i < phib.size(); ++i) - *std::remove(phib1.begin(), phib1.end(), phib.at(i)) = phib.at(i); - //phib1.erase( std::remove(phib1.begin(), phib1.end(), phia.at(i)), phib1.end() ) ; - - for (auto i = 0ul; i < s; ++i) - nc[r + i] = nb[phib1[i] - 1]; - - // std::copy( phib.begin(), phib.end(), phib1.end() ); - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(phia1.size() == pa); - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(phib1.size() == pb); - - using c_extents_type = std::decay_t< decltype(nc) >; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - array_type - >; - - auto c = tensor_core( nc, value_type{} ); - - ttt(pa, pb, q, - phia1.data(), phib1.data(), - c.data(), c.extents().data(), c.strides().data(), - a.data(), a.extents().data(), a.strides().data(), - b.data(), b.extents().data(), b.strides().data()); - - return c; - } - - // template - // auto operator*( tensor_index const& lhs, tensor_index - // const& rhs) - - /** @brief Computes the q-mode tensor-times-tensor product - * - * Implements C[i1,...,ir,j1,...,js] = sum( A[i1,...,ir+q] * B[j1,...,js+q] ) - * - * @note calls ublas::ttt - * - * na[phi[x]] = nb[phi[x]] for 1 <= x <= q - * - * @param[in] a left-hand side tensor with order r+q - * @param[in] b right-hand side tensor with order s+q - * @param[in] phi one-based permutation tuple of length q for bot input - * tensors can be of type std::vector or std::array - * @result tensor with order r+s - */ - template > - inline decltype(auto) prod( - tensor_core< TensorEngine1 > const &a, - tensor_core< TensorEngine2 > const &b, - PermuType const &phi) - { - return prod(a, b, phi, phi); - } - - /** @brief Computes the inner product of two tensors * - * Implements c = sum(A[i1,i2,...,ip] * B[i1,i2,...,jp]) - * - * @note calls inner function - * - * @param[in] a tensor object A - * @param[in] b tensor object B - * - * @returns a value type. - */ - template - inline decltype(auto) inner_prod(tensor_core< TensorEngine1 > const &a, tensor_core< TensorEngine2 > const &b) - { - - - using value_type = typename tensor_core< TensorEngine1 >::value_type; - - static_assert( - std::is_same_v::value_type>, - "error in boost::numeric::ublas::inner_prod(tensor_core< TensorEngine1 > const&, tensor_core< TensorEngine2 > const&): " - "Both the tensor should have the same value_type" - ); - - if (a.rank() != b.rank()) - throw std::length_error("error in boost::numeric::ublas::inner_prod: Rank of both the tensors must be the same."); - - if (a.empty() || b.empty()) - throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensors should not be empty."); - - if (a.extents() != b.extents()) - throw std::length_error("error in boost::numeric::ublas::inner_prod: Tensor extents should be the same."); - - return inner(a.rank(), a.extents().data(), - a.data(), a.strides().data(), - b.data(), b.strides().data(), value_type{0}); - } - - /** @brief Computes the outer product of two tensors - * - * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq] - * - * @note calls outer function - * - * @param[in] a tensor object A - * @param[in] b tensor object B - * - * @returns tensor object C with the same storage format F and allocator type A1 - */ - template ::extents_type> || - is_static_v::extents_type> ) - ,int> = 0 - > - inline decltype(auto) outer_prod(tensor_core< TensorEngine1 > const &a, tensor_core< TensorEngine2 > const &b) - { - using tensor_type = tensor_core< TensorEngine1 >; - using value_type = typename tensor_type::value_type; - using layout_type = typename tensor_type::layout_type; - using array_type = typename tensor_type::array_type; - - static_assert( - std::is_same_v< - typename tensor_core::resizable_tag, - typename tensor_core::resizable_tag - > && - std::is_same_v< - typename tensor_core::resizable_tag, - storage_resizable_container_tag - >, - "error in boost::numeric::ublas::outer_prod(tensor_core const&, tensor_core const&): " - "Both the tensor storage should have the same type of storage and both should be resizable" - ); - - static_assert( - std::is_same_v::value_type>, - "error in boost::numeric::ublas::outer_prod(tensor_core< TensorEngine1 > const&, tensor_core< TensorEngine2 > const&): " - "Both the tensor should have the same value_type" - ); - - if (a.empty() || b.empty()) - throw std::runtime_error( - "error in boost::numeric::ublas::outer_prod: " - "tensors should not be empty."); - - auto extents_result = [&e1 = a.extents(), &e2 = b.extents()](){ - using lextents_type = std::decay_t< decltype(e1) >; - using rextents_type = std::decay_t< decltype(e2) >; - - if constexpr( is_static_rank_v && is_static_rank_v ){ - return extents< lextents_type::_size + rextents_type::_size >{}; - }else { - using extents_base_type = typename extents<>::base_type; - auto arr = extents_base_type( e1.size() + e2.size(), 1 ); - return extents<>{std::move(arr)}; - } - }; - - auto nc = extents_result(); - - auto a_extents = a.extents(); - auto b_extents = b.extents(); - - for(auto i = 0u; i < a.rank(); ++i) - nc.at(i) = a_extents.at(i); - - for(auto i = 0u; i < b.rank(); ++i) - nc.at(a.rank()+i) = b_extents.at(i); - - using c_extents_type = std::decay_t< decltype(nc) >; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - array_type - >; - - auto c = tensor_core( nc, value_type{} ); - - outer(c.data(), c.rank(), c.extents().data(), c.strides().data(), - a.data(), a.rank(), a_extents.data(), a.strides().data(), - b.data(), b.rank(), b_extents.data(), b.strides().data()); - - return c; - } - - /** @brief Transposes a tensor according to a permutation tuple - * - * Implements C[tau[i1],tau[i2]...,tau[ip]] = A[i1,i2,...,ip] - * - * @note calls trans function - * - * @param[in] a tensor object of rank p - * @param[in] tau one-based permutation tuple of length p - * @returns a transposed tensor object with the same storage format F and allocator type A - */ - template > - inline decltype(auto) trans(tensor_core< TensorEngine > const &a, PermuType const &tau) - { - - using tensor_type = tensor_core< TensorEngine >; - using layout_type = typename tensor_type::layout_type; - using array_type = typename tensor_type::array_type; - using extents_type = typename tensor_type::extents_type; - - static_assert( - is_dynamic_v< extents_type > , - "error in boost::numeric::ublas::trans(tensor_core< TensorEngine > const &a, " - "PermuType const &tau): " - "Tensor should have dynamic extents" - ); - - using t_engine = tensor_engine< - extents_type, - layout_type, - strides, - array_type - >; - - auto const p = a.rank(); - auto const &na = a.extents(); - typename extents_type::base_type nc; - - if constexpr( is_dynamic_rank_v ){ - nc.resize(p); - } - - for (auto i = 0u; i < p; ++i) - nc.at(tau.at(i) - 1) = na.at(i); - - auto c = tensor_core( extents_type( std::move(nc) ) ); - - if (a.empty()) - return c; - - trans(a.rank(), a.extents().data(), tau.data(), - c.data(), c.strides().data(), - a.data(), a.strides().data()); - - return c; - } - /** - * - * @brief Computes the frobenius nor of a tensor - * - * @note Calls accumulate on the tensor. - * - * implements - * k = sqrt( sum_(i1,...,ip) A(i1,...,ip)^2 ) - * - * @tparam V the data type of tensor - * @tparam F the format of tensor storage - * @tparam A the array_type of tensor - * @param a the tensor whose norm is expected of rank p. - * @return the frobenius norm of a tensor. - */ - template - inline decltype(auto) norm(tensor_core< TensorEngine > const &a) - { - using tensor_type = tensor_core< TensorEngine >; - using value_type = typename tensor_type::value_type; - - static_assert(std::is_default_constructible::value, - "Value type of tensor must be default construct able in order " - "to call boost::numeric::ublas::norm"); - - if (a.empty()) - { - throw std::runtime_error( - "error in boost::numeric::ublas::norm: tensors should not be empty."); - } - return std::sqrt(accumulate(a.order(), a.extents().data(), a.data(), a.strides().data(), value_type{}, - [](auto const &l, auto const &r) { return l + r * r; })); - } - - - /** @brief Computes the complex conjugate component of tensor elements within a tensor expression - * - * @param[in] lhs tensor expression - * @returns unary tensor expression - */ - template::value_type>, int > = 0 - > - auto conj(detail::tensor_expression< tensor_core, D > const& expr) - { - return detail::make_unary_tensor_expression< tensor_core > (expr(), [] (auto const& l) { return std::conj( l ); } ); - } - - /** @brief Computes the complex conjugate component of tensor elements within a tensor expression - * - * @param[in] expr tensor expression - * @returns complex tensor - */ - template - auto conj(detail::tensor_expression const& expr) - { - using old_tensor_type = T; - using value_type = typename old_tensor_type::value_type; - using layout_type = typename old_tensor_type::layout_type; - using array_type = typename old_tensor_type::array_type; - using extents_type = typename old_tensor_type::extents_type; - - using complex_type = std::complex; - using storage_traits_t = storage_traits; - - using t_engine = tensor_engine< - extents_type, - layout_type, - strides, - typename storage_traits_t::template rebind - >; - - using tensor_type = tensor_core; - - if( detail::retrieve_extents( expr ).empty() ) - throw std::runtime_error("error in boost::numeric::ublas::conj: tensors should not be empty."); - - auto a = old_tensor_type( expr ); - auto c = tensor_type( a.extents() ); - - std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::conj(l) ; } ); - - return c; - } - - /** @brief Extract the real component of tensor elements within a tensor expression - * - * @param[in] lhs tensor expression - * @returns unary tensor expression - */ - template - auto real(detail::tensor_expression const& expr) { - return detail::make_unary_tensor_expression (expr(), [] (auto const& l) { return std::real( l ); } ); - } - - /** @brief Extract the real component of tensor elements within a tensor expression - * - * @param[in] lhs tensor expression - * @returns unary tensor expression - */ - template::value_type>, int > = 0 - > - auto real(detail::tensor_expression< tensor_core< TensorEngine > ,D > const& expr) - { - - using old_tensor_type = tensor_core< TensorEngine >; - using complex_type = typename old_tensor_type::value_type; - using value_type = typename complex_type::value_type; - using layout_type = typename old_tensor_type::layout_type; - using array_type = typename old_tensor_type::array_type; - using extents_type = typename old_tensor_type::extents_type; - using storage_traits_t = storage_traits; - - using t_engine = tensor_engine< - extents_type, - layout_type, - strides, - typename storage_traits_t::template rebind - >; - - using tensor_type = tensor_core; - - if( detail::retrieve_extents( expr ).empty() ) - throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty."); - - auto a = old_tensor_type( expr ); - auto c = tensor_type( a.extents() ); - - std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::real(l) ; } ); - - return c; - } - - - /** @brief Extract the imaginary component of tensor elements within a tensor expression - * - * @param[in] lhs tensor expression - * @returns unary tensor expression - */ - template - auto imag(detail::tensor_expression const& lhs) { - return detail::make_unary_tensor_expression (lhs(), [] (auto const& l) { return std::imag( l ); } ); - } - - - /** @brief Extract the imag component of tensor elements within a tensor expression - * - * @param[in] lhs tensor expression - * @returns unary tensor expression - */ - template::value_type>, int > = 0 - > - auto imag(detail::tensor_expression< tensor_core< TensorEngine > ,D> const& expr) - { - using old_tensor_type = tensor_core< TensorEngine >; - using complex_type = typename old_tensor_type::value_type; - using value_type = typename complex_type::value_type; - using layout_type = typename old_tensor_type::layout_type; - using array_type = typename old_tensor_type::array_type; - using extents_type = typename old_tensor_type::extents_type; - using storage_traits_t = storage_traits; - - using t_engine = tensor_engine< - extents_type, - layout_type, - strides, - typename storage_traits_t::template rebind - >; - - using tensor_type = tensor_core; - - if( detail::retrieve_extents( expr ).empty() ) - throw std::runtime_error("error in boost::numeric::ublas::real: tensors should not be empty."); - - auto a = old_tensor_type( expr ); - auto c = tensor_type( a.extents() ); - - std::transform( a.begin(), a.end(), c.begin(), [](auto const& l){ return std::imag(l) ; } ); - - return c; - } - -} - -// static functions -namespace boost::numeric::ublas -{ - - namespace detail{ - - template - inline - constexpr auto extents_result_tensor_times_vector( - [[maybe_unused]] basic_static_extents e, - [[maybe_unused]] basic_static_extents te1, - [[maybe_unused]] basic_static_extents te2) - { - return basic_static_extents{}; - } - - template - inline - constexpr auto extents_result_tensor_times_vector( - [[maybe_unused]] basic_static_extents e1, - [[maybe_unused]] basic_static_extents e2, - [[maybe_unused]] basic_static_extents e3 = basic_static_extents{}) - { - if constexpr(I != M - 1){ - return extents_result_tensor_times_vector - ( basic_static_extents{}, basic_static_extents{}, basic_static_extents{} ); - }else{ - return extents_result_tensor_times_vector - ( basic_static_extents{}, basic_static_extents{}, basic_static_extents{} ); - } - } - - - template - inline - constexpr auto extents_result_tensor_times_vector(basic_static_extents const& e) - { - using size_type = typename basic_static_extents::size_type; - auto ones = typename impl::make_sequence_of_ones_t< T, std::max( size_type(2), sizeof...(E) ) >::extents_type{}; - return extents_result_tensor_times_vector(e, ones); - } - - template - inline - constexpr auto static_extents_set_at( - [[maybe_unused]] basic_static_extents const& e1, - [[maybe_unused]] basic_static_extents e2 = basic_static_extents{} - ){ - static_assert( I < sizeof...(E) + 1, "boost::numeric::ublas::detail::static_extents_set_at(): out of bound"); - if constexpr( sizeof...(E) == 0 ){ - if constexpr( I == 0 ){ - return basic_static_extents{}; - }else{ - return basic_static_extents{}; - } - }else{ - if constexpr(I == 0){ - return basic_static_extents{}; - }else{ - return static_extents_set_at( basic_static_extents{}, basic_static_extents{} ); - } - } - } - - } // namespace detail - - /** @brief Computes the m-mode tensor-times-vector product - * - * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] - * - * @note calls ublas::ttv - * - * @tparam M contraction dimension with 1 <= m <= p - * @param[in] a tensor object A with order p - * @param[in] b vector object B - * - * @returns tensor object C with order p-1, the same storage format and allocator type as A - */ - template - inline decltype(auto) prod(tensor_core< TensorType > const &a - , vector::value_type, A> const &b) - { - using tensor_type = tensor_core< TensorType >; - using array_type = typename tensor_type::array_type; - using extents_type = typename tensor_type::extents_type; - using value_type = typename tensor_type::value_type; - using layout_type = typename tensor_type::layout_type; - - auto const p = std::size_t(a.rank()); - - static_assert( M != 0ul, - "error in boost::numeric::ublas::prod(ttv): " - "contraction mode must be greater than zero."); - - static_assert( extents_type::_size >= M, - "error in boost::numeric::ublas::prod(ttv): rank of tensor must be " - "greater than or equal to the modus."); - - static_assert(extents_type::_size != 0, - "error in boost::numeric::ublas::prod(ttv): first " - "argument tensor should not be empty."); - - if (b.size() == 0ul) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttv): second " - "argument vector should not be empty."); - - using extents_value_type = typename extents_type::value_type; - - auto nc = detail::extents_result_tensor_times_vector(a.extents()); - auto nb = std::vector{b.size(), extents_value_type(1)}; - using c_extents_type = std::decay_t; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - rebind_storage_size_t - >; - - auto c = t_engine(value_type{}); - auto bb = &(b(0)); - - auto& a_static_extents = a.extents().base(); - auto& c_static_extents = c.extents().base(); - - auto& a_static_strides = a.strides().base(); - auto& c_static_strides = c.strides().base(); - - ttv(M, p, - c.data(), c_static_extents.data(), c_static_strides.data(), - a.data(), a_static_extents.data(), a_static_strides.data(), - bb, nb.data(), nb.data()); - - return c; - } - - /** @brief Computes the m-mode tensor-times-matrix product - * - * Implements C[i1,...,im-1,j,im+1,...,ip] = A[i1,i2,...,ip] * B[j,im] - * - * @note calls ublas::ttm - * - * @tparam M contraction dimension with 1 <= M <= p - * @tparam MatrixDimension is a non contracting dimension - * @param[in] a tensor object A with order p - * @param[in] b vector object B - * - * @returns tensor object C with order p, the same storage format and allocator type as A - */ - template - inline decltype(auto) prod(tensor_core< TensorType > const &a, - matrix::value_type, typename tensor_core< TensorType >::layout_type, A> const &b) - { - using tensor_type = tensor_core< TensorType >; - using extents_type = typename tensor_type::extents_type; - using layout_type = typename tensor_type::layout_type; - using value_type = typename tensor_type::value_type; - using array_type = typename tensor_type::array_type; - using dynamic_strides_type = strides_t, layout_type>; - - auto const p = a.rank(); - - static_assert(M != 0ul, - "error in boost::numeric::ublas::prod(ttm): " - "contraction mode must be greater than zero."); - - static_assert( extents_type::_size >= M , - "error in boost::numeric::ublas::prod(ttm): rank " - "of the tensor must be greater equal the modus."); - - static_assert( extents_type::_size, - "error in boost::numeric::ublas::prod(ttm): first " - "argument tensor should not be empty."); - - if (b.size1() * b.size2() == 0ul) - throw std::length_error( - "error in boost::numeric::ublas::prod(ttm): second " - "argument matrix should not be empty."); - - auto nc = detail::static_extents_set_at< M - 1, MatrixDimension >( a.extents() ); - auto nb = extents<>{b.size1(), b.size2()}; - - auto wb = dynamic_strides_type(nb); - - using c_extents_type = std::decay_t; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - rebind_storage_size_t - >; - auto c = t_engine(value_type{}); - - auto bb = &(b(0, 0)); - - auto& a_static_extents = a.extents().base(); - auto& c_static_extents = c.extents().base(); - - auto& a_static_strides = a.strides().base(); - auto& c_static_strides = c.strides().base(); - ttm(M, p, - c.data(), c_static_extents.data(), c_static_strides.data(), - a.data(), a_static_extents.data(), a_static_strides.data(), - bb, nb.data(), wb.data()); - - return c; - } - - /** @brief Computes the outer product of two tensors - * - * Implements C[i1,...,ip,j1,...,jq] = A[i1,i2,...,ip] * B[j1,j2,...,jq] - * - * @note calls outer function - * - * @param[in] a tensor object A - * @param[in] b tensor object B - * - * @returns tensor object C with the same storage format F and allocator type A1 - */ - template ::extents_type > && - is_static_v< typename tensor_core< TensorEngine2 >::extents_type > - ,int> = 0 - > - inline decltype(auto) outer_prod(tensor_core< TensorEngine1 > const &a, tensor_core< TensorEngine2 > const &b) - { - if (a.empty() || b.empty()) - throw std::runtime_error( - "error in boost::numeric::ublas::outer_prod: " - "tensors should not be empty."); - - using extents_type1 = std::decay_t< decltype(a.extents()) >; - using extents_type2 = std::decay_t< decltype(b.extents()) >; - using array_type = typename tensor_core< TensorEngine1 >::array_type; - using value_type = typename tensor_core< TensorEngine1 >::value_type; - using layout_type = typename tensor_core< TensorEngine1 >::layout_type; - - static_assert( - std::is_same_v::value_type>, - "error in boost::numeric::ublas::outer_prod(tensor_core< TensorEngine1 > const&, tensor_core< TensorEngine2 > const&): " - "Both the tensor should have the same value_type" - ); - - auto nc = detail::impl::concat_t{}; - - auto a_extents = a.extents(); - auto b_extents = b.extents(); - - - using c_extents_type = std::decay_t; - - using t_engine = tensor_engine< - c_extents_type, - layout_type, - strides, - rebind_storage_size_t - >; - - auto c = t_engine(value_type{}); - - auto& a_static_extents = a_extents.base(); - auto& a_static_strides = a.strides().base(); - - auto& b_static_extents = b_extents.base(); - auto& b_static_strides = b.strides().base(); - - auto c_static_extents = c.extents().base(); - auto c_static_strides = c.strides().base(); - - outer(c.data(), c.rank(), c_static_extents.data(), c_static_strides.data(), - a.data(), a.rank(), a_static_extents.data(), a_static_strides.data(), - b.data(), b.rank(), b_static_extents.data(), b_static_strides.data()); - - return c; - } -} +#ifndef BOOST_UBLAS_TENSOR_FUNCTIONS_HPP +#define BOOST_UBLAS_TENSOR_FUNCTIONS_HPP + +#include "function/reshape.hpp" +#include "function/inner_prod.hpp" +#include "function/outer_prod.hpp" +#include "function/norm.hpp" +#include "function/imag.hpp" +#include "function/real.hpp" +#include "function/conj.hpp" +#include "function/trans.hpp" +#include "function/tensor_times_vector.hpp" +#include "function/tensor_times_matrix.hpp" +#include "function/tensor_times_tensor.hpp" +#include "function/init.hpp" #endif diff --git a/include/boost/numeric/ublas/tensor/index.hpp b/include/boost/numeric/ublas/tensor/index.hpp index 7de9e52e7..f13872ceb 100644 --- a/include/boost/numeric/ublas/tensor/index.hpp +++ b/include/boost/numeric/ublas/tensor/index.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -13,14 +13,11 @@ #define BOOST_UBLAS_TENSOR_INDEX_HPP -#include #include +#include #include -namespace boost { -namespace numeric { -namespace ublas { -namespace index { +namespace boost::numeric::ublas::index { /** @brief Proxy template class for the einstein summation notation * @@ -80,10 +77,7 @@ static constexpr index_type<24> _x; static constexpr index_type<25> _y; static constexpr index_type<26> _z; -} // namespace indices +} // namespace boost::numeric::ublas::index -} -} -} #endif // _BOOST_UBLAS_TENSOR_INDEX_HPP_ diff --git a/include/boost/numeric/ublas/tensor/index_functions.hpp b/include/boost/numeric/ublas/tensor/index_functions.hpp new file mode 100644 index 000000000..bc3d54ac0 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/index_functions.hpp @@ -0,0 +1,64 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_INDEX_FUNCTIONS_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_INDEX_FUNCTIONS_HPP + +#include +#include +#include +#include "concepts.hpp" + + +namespace boost::numeric::ublas::detail +{ + +/** @brief Returns relative memory index with respect to a multi-index + * + * @code auto j = to_index({3,4,5}, to_strides({4,2,3},first_order{})); @endcode + * + * @param[in] i multi-index of length p + * @param[in] w stride vector of length p + * @returns relative memory location depending on \c i and \c w + */ +template +[[nodiscard]] inline constexpr auto to_index(std::vector const& w, std::vector const& i) +{ + return std::inner_product(i.begin(), i.end(), w.begin(), T{}); +} + +template +[[nodiscard]] inline constexpr auto to_index(std::array const& w, std::array const& i) +{ + return std::inner_product(i.begin(), i.end(), w.begin(), T{}); +} + +template +[[nodiscard]] inline constexpr auto to_index(std::array const& w, Is ... is) +{ + static_assert(N != sizeof...(is)+2); + auto ai = std::array{I(is)...}; + return std::inner_product(ai.begin(), ai.end(), w.begin(), I{}); +} + +template +[[nodiscard]] inline auto to_index(std::vector const& w, Is ... is) +{ + constexpr auto N = sizeof...(is); + auto ai = std::array{I(is)...}; + return std::inner_product(ai.begin(), ai.end(), w.begin(), std::size_t{}); +} + + +} // namespace boost::numeric::ublas::detail + +#endif // BOOST_NUMERIC_UBLAS_TENSOR_INDEX_FUNCTIONS_HPP diff --git a/include/boost/numeric/ublas/tensor/multi_index.hpp b/include/boost/numeric/ublas/tensor/multi_index.hpp index 5b4638944..adb5f8707 100644 --- a/include/boost/numeric/ublas/tensor/multi_index.hpp +++ b/include/boost/numeric/ublas/tensor/multi_index.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -13,30 +13,16 @@ #define BOOST_UBLAS_TENSOR_MULTI_INDEX_HPP -#include + #include +#include #include -#include "multi_index_utility.hpp" #include "index.hpp" - -namespace boost { -namespace numeric { -namespace ublas { -namespace index { - -template -struct index_type; - -} // namespace indices -} -} -} +#include "multi_index_utility.hpp" -namespace boost { -namespace numeric { -namespace ublas { +namespace boost::numeric::ublas { /** @brief Proxy class for the einstein summation notation * @@ -49,7 +35,8 @@ class multi_index multi_index() = delete; template - constexpr multi_index(index::index_type const& i, indexes ... is ) + constexpr explicit inline + multi_index(index::index_type const& i, indexes ... is ) : _base{i(), is()... } { static_assert( sizeof...(is)+1 == N, @@ -59,30 +46,34 @@ class multi_index "Static assert in boost::numeric::ublas::multi_index: indexes occur twice in multi-index." ); } - multi_index(multi_index const& other) - : _base(other._base) + multi_index(multi_index const& other) = default; + multi_index(multi_index&& other) noexcept = default ; + + multi_index& operator=(multi_index other) { + std::swap(this->_base,other._base); + return *this; } - multi_index& operator=(multi_index const& other) + multi_index& operator=(multi_index&& other) noexcept { - this->_base = other._base; - return *this; + this->_base = std::move(other._base); + return *this; } ~multi_index() = default; - auto const& base() const { return _base; } - constexpr auto size() const { return _base.size(); } - constexpr auto at(std::size_t i) const { return _base.at(i); } - constexpr auto operator[](std::size_t i) const { return _base.at(i); } + [[nodiscard]] inline auto const& base() const { return _base; } + [[nodiscard]] inline constexpr auto size() const { return _base.size(); } + [[nodiscard]] inline constexpr auto at(std::size_t i) const { return _base.at(i); } + [[nodiscard]] inline constexpr auto operator[](std::size_t i) const { return _base.at(i); } private: std::array _base; }; template -constexpr auto get(multi_index const& m) { return std::get(m.base()); } +inline constexpr auto get(multi_index const& m) { return std::get(m.base()); } template auto array_to_vector(multi_index const& lhs, multi_index const& rhs) @@ -91,22 +82,17 @@ auto array_to_vector(multi_index const& lhs, multi_index const& rhs) auto pair_of_vector = std::make_pair( vtype {}, vtype{} ); - for(auto i = 0u; i < N; ++i) - for(auto j = 0u; j < M; ++j) + for(auto i = 0ul; i < N; ++i){ + for(auto j = 0ul; j < M; ++j){ if ( lhs.at(i) == rhs.at(j) && lhs.at(i) != boost::numeric::ublas::index::_()){ pair_of_vector.first .push_back( i+1 ); pair_of_vector.second.push_back( j+1 ); } - + } + } return pair_of_vector; } - - - - -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas #endif // MULTI_INDEX_HPP diff --git a/include/boost/numeric/ublas/tensor/multi_index_utility.hpp b/include/boost/numeric/ublas/tensor/multi_index_utility.hpp index f4593e1fd..aa98fa4b0 100644 --- a/include/boost/numeric/ublas/tensor/multi_index_utility.hpp +++ b/include/boost/numeric/ublas/tensor/multi_index_utility.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -13,15 +13,12 @@ #define BOOST_UBLAS_TENSOR_MULTI_INDEX_UTILITY_HPP +#include #include #include -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { - +namespace boost::numeric::ublas::detail { template struct has_index_impl; @@ -50,9 +47,12 @@ struct has_index_impl > using next_type = has_index_impl>; static constexpr bool value = has_index_impl::value || next_type::value; }; -} // namespace detail +} // namespace boost::numeric::ublas::detail + +namespace boost::numeric::ublas +{ /** @brief has_index is true if index occurs once or more in a multi-index * @@ -69,17 +69,12 @@ struct has_index static constexpr bool value = detail::has_index_impl,std::decay_t>::value; }; -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas //////////////////////////////////////////////// //////////////////////////////////////////////// -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +namespace boost::numeric::ublas::detail { template @@ -108,7 +103,10 @@ struct valid_multi_index_impl> static constexpr bool has_index_value = has_index_type::value && !is_index_zero; static constexpr bool value = !has_index_value && valid_multi_index_impl::value; }; -} // namespace detail +} // namespace boost::numeric::ublas::detail + +namespace boost::numeric::ublas +{ /** @brief valid_multi_index is true if indexes occur only once in a multi-index * @@ -125,17 +123,13 @@ struct valid_multi_index static constexpr bool value = detail::valid_multi_index_impl>::value; }; -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas //////////////////////////////////////////////// //////////////////////////////////////////////// -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +namespace boost::numeric::ublas::detail +{ template struct number_equal_indexes_impl; @@ -159,9 +153,11 @@ struct number_equal_indexes_impl < std::tuple, std::tuple< static constexpr unsigned v = has_index_value ? 1 : 0; static constexpr unsigned value = v + next_type::value; }; -} // namespace detail +} // namespace boost::numeric::ublas::detail +namespace boost::numeric::ublas { + /** @brief number_equal_indexes contains the number of equal indexes of two multi-indexes * * @note a multi-index represents as tuple of single indexes of type boost::numeric::ublas::index::index_type @@ -182,18 +178,14 @@ struct number_equal_indexes detail::number_equal_indexes_impl< std::decay_t, std::decay_t>::value; }; -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas //////////////////////////////////////////////// //////////////////////////////////////////////// -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +namespace boost::numeric::ublas::detail +{ template @@ -211,10 +203,11 @@ struct index_position_impl < m, m, itype, ttype> static constexpr auto value = std::tuple_size::value; }; -} // namespace detail - +} // namespace boost::numeric::ublas::detail +namespace boost::numeric::ublas +{ /** @brief index_position contains the zero-based index position of an index type within a multi-index * * @note a multi-index represents as tuple of single indexes of type boost::numeric::ublas::index::index_type @@ -235,18 +228,14 @@ struct index_position static constexpr auto value = detail::index_position_impl<0ul,std::tuple_size::value,std::decay_t,std::decay_t>::value; }; -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas //////////////////////////////////////////////// //////////////////////////////////////////////// -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +namespace boost::numeric::ublas::detail +{ template struct index_position_pairs_impl @@ -295,9 +284,10 @@ struct index_position_pairs_impl }; -} // namespace detail - +} // namespace boost::numeric::ublas::detail +namespace boost::numeric::ublas +{ /** @brief index_position_pairs returns zero-based index positions of matching indexes of two multi-indexes * * @note a multi-index represents as tuple of single indexes of type boost::numeric::ublas::index::index_type @@ -321,9 +311,7 @@ auto index_position_pairs(tuple_left const& lhs, tuple_right const& rhs) return array; } -} // namespace ublas -} // namespace numeric -} // namespace boost +} // namespace boost::numeric::ublas //////////////////////////// //////////////////////////// @@ -331,42 +319,40 @@ auto index_position_pairs(tuple_left const& lhs, tuple_right const& rhs) //////////////////////////// -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { +//namespace boost::numeric::ublas::detail +//{ -template -constexpr auto array_to_vector_impl( array_type const& array, [[maybe_unused]] std::index_sequence sq) -{ - return std::make_pair( - std::vector{std::get<0>( std::get(array) )+1 ...} , - std::vector{std::get<1>( std::get(array) )+1 ...} ); -} +//template +//constexpr auto array_to_vector_impl( array_type const& array, std::index_sequence /*unused*/) +//{ +// return std::make_pair( +// std::vector{std::get(array).first +1 ...} , +// std::vector{std::get(array).second +1 ...} ); +//} -} // namespace detail +//} // namespace boost::numeric::ublas::detail -/** @brief array_to_vector converts a std::array of zero-based index position pairs into two std::vector of one-based index positions - * - * @code auto two_vectors = array_to_vector(std::make_array ( std::make_pair(1,2), std::make_pair(3,4) ) ) ; - * @endcode - * - * @returns two std::vector of one-based index positions - * - * @param array std::array of zero-based index position pairs -*/ -template -constexpr auto array_to_vector( std::array const& array) -{ - constexpr auto sequence = std::make_index_sequence{}; - return detail::array_to_vector_impl( array, sequence ); -} +//namespace boost::numeric::ublas +//{ +///** @brief array_to_vector converts a std::array of zero-based index position pairs into two std::vector of one-based index positions +// * +// * @code auto two_vectors = array_to_vector(std::make_array ( std::make_pair(1,2), std::make_pair(3,4) ) ) ; +// * @endcode +// * +// * @returns two std::vector of one-based index positions +// * +// * @param array std::array of zero-based index position pairs +//*/ +//template +//constexpr auto array_to_vector( std::array const& array) +//{ +// constexpr auto sequence = std::make_index_sequence{}; +// return detail::array_to_vector_impl( array, sequence ); +//} -} // namespace ublas -} // namespace numeric -} // namespace boost +//} // namespace boost::numeric::ublas -#endif // _BOOST_UBLAS_TENSOR_MULTI_INDEX_UTILITY_HPP_ +#endif // BOOST_UBLAS_TENSOR_MULTI_INDEX_UTILITY_HPP diff --git a/include/boost/numeric/ublas/tensor/multiplication.hpp b/include/boost/numeric/ublas/tensor/multiplication.hpp index 934dd2d17..6a9c0613b 100644 --- a/include/boost/numeric/ublas/tensor/multiplication.hpp +++ b/include/boost/numeric/ublas/tensor/multiplication.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -15,11 +15,8 @@ #include -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { -namespace recursive { +namespace boost::numeric::ublas { +namespace detail::recursive { /** @brief Computes the tensor-times-tensor product for q contraction modes @@ -57,34 +54,30 @@ void ttt(SizeType const k, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(k < r) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == na[phia[k]-1]); - for(size_t ic = 0u; ic < nc[k]; a += wa[phia[k]-1], c += wc[k], ++ic) - ttt(k+1, r, s, q, phia,phib, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(k < r+s) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == nb[phib[k-r]-1]); - for(size_t ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic) - ttt(k+1, r, s, q, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(k < r+s+q-1) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(na[phia[k-s]-1] == nb[phib[k-r]-1]); - for(size_t ia = 0u; ia < na[phia[k-s]-1]; a += wa[phia[k-s]-1], b += wb[phib[k-r]-1], ++ia) - ttt(k+1, r, s, q, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); - } - else - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(na[phia[k-s]-1] == nb[phib[k-r]-1]); - for(size_t ia = 0u; ia < na[phia[k-s]-1]; a += wa[phia[k-s]-1], b += wb[phib[k-r]-1], ++ia) - *c += *a * *b; + if(k < r) { + assert(nc[k] == na[phia[k]-1]); + for(SizeType ic = 0u; ic < nc[k]; a += wa[phia[k]-1], c += wc[k], ++ic) { + ttt(k+1, r, s, q, phia,phib, c, nc, wc, a, na, wa, b, nb, wb); } + } + else if(k < r+s) { + assert(nc[k] == nb[phib[k-r]-1]); + for(SizeType ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic) { + ttt(k+1, r, s, q, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else if(k < r+s+q-1) { + assert(na[phia[k-s]-1] == nb[phib[k-r]-1]); + for(SizeType ia = 0u; ia < na[phia[k-s]-1]; a += wa[phia[k-s]-1], b += wb[phib[k-r]-1], ++ia) { + ttt(k+1, r, s, q, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else { + assert(na[phia[k-s]-1] == nb[phib[k-r]-1]); + for(SizeType ia = 0u; ia < na[phia[k-s]-1]; a += wa[phia[k-s]-1], b += wb[phib[k-r]-1], ++ia) { + *c += *a * *b; + } + } } @@ -124,34 +117,30 @@ void ttt(SizeType const k, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(k < r) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == na[k]); - for(size_t ic = 0u; ic < nc[k]; a += wa[k], c += wc[k], ++ic) - ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(k < r+s) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == nb[k-r]); - for(size_t ic = 0u; ic < nc[k]; b += wb[k-r], c += wc[k], ++ic) - ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(k < r+s+q-1) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(na[k-s] == nb[k-r]); - for(size_t ia = 0u; ia < na[k-s]; a += wa[k-s], b += wb[k-r], ++ia) - ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); - } - else - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(na[k-s] == nb[k-r]); - for(size_t ia = 0u; ia < na[k-s]; a += wa[k-s], b += wb[k-r], ++ia) - *c += *a * *b; + if(k < r) { + assert(nc[k] == na[k]); + for(auto ic = 0ul; ic < nc[k]; a += wa[k], c += wc[k], ++ic) { + ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else if(k < r+s) { + assert(nc[k] == nb[k-r]); + for(auto ic = 0ul; ic < nc[k]; b += wb[k-r], c += wc[k], ++ic) { + ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else if(k < r+s+q-1) { + assert(na[k-s] == nb[k-r]); + for(auto ia = 0ul; ia < na[k-s]; a += wa[k-s], b += wb[k-r], ++ia) { + ttt(k+1, r, s, q, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else { + assert(na[k-s] == nb[k-r]); + for(auto ia = 0ul; ia < na[k-s]; a += wa[k-s], b += wb[k-r], ++ia) { + *c += *a * *b; } + } } @@ -181,26 +170,28 @@ void ttm(SizeType const m, SizeType const r, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(r == m) { - ttm(m, r-1, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(r == 0){ - for(auto i0 = 0ul; i0 < nc[0]; c += wc[0], a += wa[0], ++i0) { - auto cm = c; - auto b0 = b; - for(auto i1 = 0ul; i1 < nc[m]; cm += wc[m], b0 += wb[0], ++i1){ - auto am = a; - auto b1 = b0; - for(auto i2 = 0ul; i2 < nb[1]; am += wa[m], b1 += wb[1], ++i2) - *cm += *am * *b1; - } + if(r == m) { + ttm(m, r-1, c, nc, wc, a, na, wa, b, nb, wb); + } + else if(r == 0){ + for(auto i0 = 0ul; i0 < nc[0]; c += wc[0], a += wa[0], ++i0) { + auto cm = c; + auto b0 = b; + for(auto i1 = 0ul; i1 < nc[m]; cm += wc[m], b0 += wb[0], ++i1){ + auto am = a; + auto b1 = b0; + for(auto i2 = 0ul; i2 < nb[1]; am += wa[m], b1 += wb[1], ++i2){ + *cm += *am * *b1; } + } } + } - else{ - for(auto i = 0ul; i < na[r]; c += wc[r], a += wa[r], ++i) - ttm(m, r-1, c, nc, wc, a, na, wa, b, nb, wb); + else{ + for(auto i = 0ul; i < na[r]; c += wc[r], a += wa[r], ++i){ + ttm(m, r-1, c, nc, wc, a, na, wa, b, nb, wb); } + } } /** @brief Computes the tensor-times-matrix product for the contraction mode m = 0 @@ -223,31 +214,31 @@ void ttm(SizeType const m, SizeType const r, */ template void ttm0( SizeType const r, - PointerOut c, SizeType const*const nc, SizeType const*const wc, - PointerIn1 a, SizeType const*const na, SizeType const*const wa, - PointerIn2 b, SizeType const*const nb, SizeType const*const wb) + PointerOut c, SizeType const*const nc, SizeType const*const wc, + PointerIn1 a, SizeType const*const na, SizeType const*const wa, + PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(r > 1){ - for(auto i = 0ul; i < na[r]; c += wc[r], a += wa[r], ++i) - ttm0(r-1, c, nc, wc, a, na, wa, b, nb, wb); + if(r > 1){ + for(auto i = 0ul; i < na[r]; c += wc[r], a += wa[r], ++i){ + ttm0(r-1, c, nc, wc, a, na, wa, b, nb, wb); } - else{ - for(auto i1 = 0ul; i1 < nc[1]; c += wc[1], a += wa[1], ++i1) { - auto cm = c; - auto b0 = b; - // r == m == 0 - for(auto i0 = 0ul; i0 < nc[0]; cm += wc[0], b0 += wb[0], ++i0){ - - auto am = a; - auto b1 = b0; - for(auto i2 = 0u; i2 < nb[1]; am += wa[0], b1 += wb[1], ++i2){ - - *cm += *am * *b1; - } - } + } + else{ + for(auto i1 = 0ul; i1 < nc[1]; c += wc[1], a += wa[1], ++i1) { + auto cm = c; + auto b0 = b; + // r == m == 0 + for(auto i0 = 0ul; i0 < nc[0]; cm += wc[0], b0 += wb[0], ++i0){ + + auto am = a; + auto b1 = b0; + for(auto i2 = 0u; i2 < nb[1]; am += wa[0], b1 += wb[1], ++i2){ + *cm += *am * *b1; } + } } + } } @@ -277,25 +268,29 @@ void ttm0( SizeType const r, template void ttv( SizeType const m, SizeType const r, SizeType const q, - PointerOut c, SizeType const*const nc, SizeType const*const wc, - PointerIn1 a, SizeType const*const na, SizeType const*const wa, - PointerIn2 b) + PointerOut c, SizeType const*const nc, SizeType const*const wc, + PointerIn1 a, SizeType const*const na, SizeType const*const wa, + PointerIn2 b) { - if(r == m) { - ttv(m, r-1, q, c, nc, wc, a, na, wa, b); + if(r == m) { + ttv(m, r-1, q, c, nc, wc, a, na, wa, b); + } + else if(r == 0){ + for(auto i0 = 0u; i0 < na[0]; c += wc[0], a += wa[0], ++i0) { + auto c1 = c; + auto a1 = a; + auto b1 = b; + for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im){ + *c1 += *a1 * *b1; + } } - else if(r == 0){ - for(auto i0 = 0u; i0 < na[0]; c += wc[0], a += wa[0], ++i0) { - auto c1 = c; auto a1 = a; auto b1 = b; - for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im) - *c1 += *a1 * *b1; - } - } - else{ - for(auto i = 0u; i < na[r]; c += wc[q], a += wa[r], ++i) - ttv(m, r-1, q-1, c, nc, wc, a, na, wa, b); + } + else{ + for(auto i = 0u; i < na[r]; c += wc[q], a += wa[r], ++i){ + ttv(m, r-1, q-1, c, nc, wc, a, na, wa, b); } + } } @@ -322,18 +317,21 @@ void ttv0(SizeType const r, PointerIn2 b) { - if(r > 1){ - for(auto i = 0u; i < na[r]; c += wc[r-1], a += wa[r], ++i) - ttv0(r-1, c, nc, wc, a, na, wa, b); + if(r > 1){ + for(auto i = 0u; i < na[r]; c += wc[r-1], a += wa[r], ++i) { + ttv0(r-1, c, nc, wc, a, na, wa, b); } - else{ - for(auto i1 = 0u; i1 < na[1]; c += wc[0], a += wa[1], ++i1) - { - auto c1 = c; auto a1 = a; auto b1 = b; - for(auto i0 = 0u; i0 < na[0]; a1 += wa[0], ++b1, ++i0) - *c1 += *a1 * *b1; - } + } + else{ + for(auto i1 = 0u; i1 < na[1]; c += wc[0], a += wa[1], ++i1) { + auto c1 = c; + auto a1 = a; + auto b1 = b; + for(auto i0 = 0u; i0 < na[0]; a1 += wa[0], ++b1, ++i0){ + *c1 += *a1 * *b1; + } } + } } @@ -354,18 +352,21 @@ void ttv0(SizeType const r, */ template void mtv(SizeType const m, - PointerOut c, [[maybe_unused]] SizeType const*const nc, SizeType const*const wc, - PointerIn1 a, SizeType const*const na, SizeType const*const wa, + PointerOut c, SizeType const*const /*unsed*/, SizeType const*const wc, + PointerIn1 a, SizeType const*const na , SizeType const*const wa, PointerIn2 b) { - // decides whether matrix multiplied with vector or vector multiplied with matrix - const auto o = (m == 0) ? 1 : 0; - - for(auto io = 0u; io < na[o]; c += wc[o], a += wa[o], ++io) { - auto c1 = c; auto a1 = a; auto b1 = b; - for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im) - *c1 += *a1 * *b1; + // decides whether matrix multiplied with vector or vector multiplied with matrix + const auto o = (m == 0) ? 1 : 0; + + for(auto io = 0u; io < na[o]; c += wc[o], a += wa[o], ++io) { + auto c1 = c; + auto a1 = a; + auto b1 = b; + for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im) { + *c1 += *a1 * *b1; } + } } @@ -391,28 +392,23 @@ void mtm(PointerOut c, SizeType const*const nc, SizeType const*const wc, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - // C(i,j) = A(i,k) * B(k,j) - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[0] == na[0]); - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[1] == nb[1]); - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(na[1] == nb[0]); - - auto cj = c; auto bj = b; - for(auto j = 0u; j < nc[1]; cj += wc[1], bj += wb[1], ++j) { - - auto bk = bj; auto ak = a; - for(auto k = 0u; k < na[1]; ak += wa[1], bk += wb[0], ++k) { - - auto ci = cj; auto ai = ak; - for(auto i = 0u; i < na[0]; ai += wa[0], ci += wc[0], ++i){ - *ci += *ai * *bk; - } - - } - + // C(i,j) = A(i,k) * B(k,j) + assert(nc[0] == na[0]); + assert(nc[1] == nb[1]); + assert(na[1] == nb[0]); + + auto cj = c; auto bj = b; + for(auto j = 0u; j < nc[1]; cj += wc[1], bj += wb[1], ++j) { + auto bk = bj; + auto ak = a; + for(auto k = 0u; k < na[1]; ak += wa[1], bk += wb[0], ++k) { + auto ci = cj; + auto ai = ak; + for(auto i = 0u; i < na[0]; ai += wa[0], ci += wc[0], ++i){ + *ci += *ai * *bk; + } } + } } @@ -438,13 +434,17 @@ value_t inner(SizeType const r, SizeType const*const n, PointerIn2 b, SizeType const*const wb, value_t v) { - if(r == 0) - for(auto i0 = 0u; i0 < n[0]; a += wa[0], b += wb[0], ++i0) - v += *a * *b; - else - for(auto ir = 0u; ir < n[r]; a += wa[r], b += wb[r], ++ir) - v = inner(r-1, n, a, wa, b, wb, v); - return v; + if(r == 0){ + for(auto i0 = 0u; i0 < n[0]; a += wa[0], b += wb[0], ++i0){ + v += *a * *b; + } + } + else{ + for(auto ir = 0u; ir < n[r]; a += wa[r], b += wb[r], ++ir){ + v = inner(r-1, n, a, wa, b, wb, v); + } + } + return v; } @@ -454,25 +454,26 @@ void outer_2x2(SizeType const pa, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - // assert(rc == 3); - // assert(ra == 1); - // assert(rb == 1); - - for(auto ib1 = 0u; ib1 < nb[1]; b += wb[1], c += wc[pa+1], ++ib1) { - auto c2 = c; - auto b0 = b; - for(auto ib0 = 0u; ib0 < nb[0]; b0 += wb[0], c2 += wc[pa], ++ib0) { - const auto new_b = *b0; - auto c1 = c2; - auto a1 = a; - for(auto ia1 = 0u; ia1 < na[1]; a1 += wa[1], c1 += wc[1], ++ia1) { - auto a0 = a1; - auto c0 = c1; - for(SizeType ia0 = 0u; ia0 < na[0]; a0 += wa[0], c0 += wc[0], ++ia0) - *c0 = *a0 * new_b; - } + // assert(rc == 3); + // assert(ra == 1); + // assert(rb == 1); + + for(auto ib1 = 0u; ib1 < nb[1]; b += wb[1], c += wc[pa+1], ++ib1) { + auto c2 = c; + auto b0 = b; + for(auto ib0 = 0u; ib0 < nb[0]; b0 += wb[0], c2 += wc[pa], ++ib0) { + const auto new_b = *b0; + auto c1 = c2; + auto a1 = a; + for(auto ia1 = 0u; ia1 < na[1]; a1 += wa[1], c1 += wc[1], ++ia1) { + auto a0 = a1; + auto c0 = c1; + for(SizeType ia0 = 0u; ia0 < na[0]; a0 += wa[0], c0 += wc[0], ++ia0){ + *c0 = *a0 * new_b; } + } } + } } /** @brief Computes the outer product of two tensors @@ -505,14 +506,19 @@ void outer(SizeType const pa, SizeType const ra, PointerIn1 a, SizeType const*const na, SizeType const*const wa, SizeType const rb, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(rb > 1) - for(auto ib = 0u; ib < nb[rb]; b += wb[rb], c += wc[rc], ++ib) - outer(pa, rc-1, c, nc, wc, ra, a, na, wa, rb-1, b, nb, wb); - else if(ra > 1) - for(auto ia = 0u; ia < na[ra]; a += wa[ra], c += wc[ra], ++ia) - outer(pa, rc-1, c, nc, wc, ra-1, a, na, wa, rb, b, nb, wb); - else - outer_2x2(pa, c, nc, wc, a, na, wa, b, nb, wb); //assert(ra==1 && rb==1 && rc==3); + if(rb > 1){ + for(auto ib = 0u; ib < nb[rb]; b += wb[rb], c += wc[rc], ++ib){ + outer(pa, rc-1, c, nc, wc, ra, a, na, wa, rb-1, b, nb, wb); + } + } + else if(ra > 1){ + for(auto ia = 0u; ia < na[ra]; a += wa[ra], c += wc[ra], ++ia){ + outer(pa, rc-1, c, nc, wc, ra-1, a, na, wa, rb, b, nb, wb); + } + } + else{ + outer_2x2(pa, c, nc, wc, a, na, wa, b, nb, wb); //assert(ra==1 && rb==1 && rc==3); + } } @@ -551,38 +557,29 @@ void outer(SizeType const k, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - if(k < r) - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == na[phia[k]-1]); - for(size_t ic = 0u; ic < nc[k]; a += wa[phia[k]-1], c += wc[k], ++ic) - outer(k+1, r, s, phia,phib, c, nc, wc, a, na, wa, b, nb, wb); - } - else if(k < r+s-1) - { - - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == nb[phib[k-r]-1]); - for(size_t ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic) - outer(k+1, r, s, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); - } - else - { - // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay) - assert(nc[k] == nb[phib[k-r]-1]); - for(size_t ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic) - *c = *a * *b; + if(k < r) { + assert(nc[k] == na[phia[k]-1]); + for(auto ic = 0ul; ic < nc[k]; a += wa[phia[k]-1], c += wc[k], ++ic){ + outer(k+1, r, s, phia,phib, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else if(k < r+s-1) { + assert(nc[k] == nb[phib[k-r]-1]); + for(auto ic = 0ul; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic){ + outer(k+1, r, s, phia, phib, c, nc, wc, a, na, wa, b, nb, wb); + } + } + else { + assert(nc[k] == nb[phib[k-r]-1]); + for(auto ic = 0u; ic < nc[k]; b += wb[phib[k-r]-1], c += wc[k], ++ic){ + *c = *a * *b; } + } } -} // namespace recursive -} // namespace detail -} // namespace ublas -} // namespace numeric -} // namespace boost - - +} // namespace detail::recursive +} // namespace boost::numeric::ublas ////////////////////////////////////////////////////////////////////////////////////////// @@ -598,9 +595,7 @@ void outer(SizeType const k, #include -namespace boost { -namespace numeric { -namespace ublas { +namespace boost::numeric::ublas { /** @brief Computes the tensor-times-vector product * @@ -628,44 +623,53 @@ void ttv(SizeType const m, SizeType const p, const PointerIn1 a, SizeType const*const na, SizeType const*const wa, const PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - static_assert( std::is_pointer::value & std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::ttv: Argument types for pointers are not pointer types."); - - if( m == 0) - throw std::length_error("Error in boost::numeric::ublas::ttv: Contraction mode must be greater than zero."); - - if( p < m ) - throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater equal the modus."); - - if( p == 0) - throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater than zero."); - - if(c == nullptr || a == nullptr || b == nullptr) - throw std::length_error("Error in boost::numeric::ublas::ttv: Pointers shall not be null pointers."); - - for(auto i = 0u; i < m-1; ++i) - if(na[i] != nc[i]) - throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); - - for(auto i = m; i < p; ++i) - if(na[i] != nc[i-1]) - throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); - - const auto max = std::max(nb[0], nb[1]); - if( na[m-1] != max) - throw std::length_error("Error in boost::numeric::ublas::ttv: Extent of dimension mode of A and b must be equal."); - + static_assert( std::is_pointer::value && std::is_pointer::value & std::is_pointer::value, + "Static error in boost::numeric::ublas::ttv: Argument types for pointers are not pointer types."); + + if( m == 0){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Contraction mode must be greater than zero."); + } + + if( p < m ){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater equal the modus."); + } + if( p == 0){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater than zero."); + } + if(c == nullptr || a == nullptr || b == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Pointers shall not be null pointers."); + } + for(auto i = 0u; i < m-1; ++i){ + if(na[i] != nc[i]){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); + } + } - if((m != 1) && (p > 2)) - detail::recursive::ttv(m-1, p-1, p-2, c, nc, wc, a, na, wa, b); - else if ((m == 1) && (p > 2)) - detail::recursive::ttv0(p-1, c, nc, wc, a, na, wa, b); - else if( p == 2 ) - detail::recursive::mtv(m-1, c, nc, wc, a, na, wa, b); - else /*if( p == 1 )*/{ - auto v = std::remove_pointer_t>{}; - *c = detail::recursive::inner(SizeType(0), na, a, wa, b, wb, v); + for(auto i = m; i < p; ++i){ + if(na[i] != nc[i-1]){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); } + } + + const auto max = std::max(nb[0], nb[1]); + if( na[m-1] != max){ + throw std::length_error("Error in boost::numeric::ublas::ttv: Extent of dimension mode of A and b must be equal."); + } + + + if((m != 1) && (p > 2)){ + detail::recursive::ttv(m-1, p-1, p-2, c, nc, wc, a, na, wa, b); + } + else if ((m == 1) && (p > 2)){ + detail::recursive::ttv0(p-1, c, nc, wc, a, na, wa, b); + } + else if( p == 2 ){ + detail::recursive::mtv(m-1, c, nc, wc, a, na, wa, b); + } + else /*if( p == 1 )*/{ + auto v = std::remove_pointer_t>{}; + *c = detail::recursive::inner(SizeType(0), na, a, wa, b, wb, v); + } } @@ -699,40 +703,47 @@ void ttm(SizeType const m, SizeType const p, const PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - static_assert( std::is_pointer::value & std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types."); - - if( m == 0 ) - throw std::length_error("Error in boost::numeric::ublas::ttm: Contraction mode must be greater than zero."); - - if( p < m ) - throw std::length_error("Error in boost::numeric::ublas::ttm: Rank must be greater equal than the specified mode."); - - if( p == 0) - throw std::length_error("Error in boost::numeric::ublas::ttm:Rank must be greater than zero."); - - if(c == nullptr || a == nullptr || b == nullptr) - throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); - - for(auto i = 0u; i < m-1; ++i) - if(na[i] != nc[i]) - throw std::length_error("Error in boost::numeric::ublas::ttm: Extents (except of dimension mode) of A and C must be equal."); - - for(auto i = m; i < p; ++i) - if(na[i] != nc[i]) - throw std::length_error("Error in boost::numeric::ublas::ttm: Extents (except of dimension mode) of A and C must be equal."); - - if(na[m-1] != nb[1]) - throw std::length_error("Error in boost::numeric::ublas::ttm: 2nd Extent of B and M-th Extent of A must be the equal."); - - if(nc[m-1] != nb[0]) - throw std::length_error("Error in boost::numeric::ublas::ttm: 1nd Extent of B and M-th Extent of C must be the equal."); - - if ( m != 1 ) - detail::recursive::ttm (m-1, p-1, c, nc, wc, a, na, wa, b, nb, wb); - else /*if (m == 1 && p > 2)*/ - detail::recursive::ttm0( p-1, c, nc, wc, a, na, wa, b, nb, wb); - + static_assert( + std::is_pointer::value && + std::is_pointer::value && + std::is_pointer::value); + + if( m == 0 ){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Contraction mode must be greater than zero."); + } + if( p < m ){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Rank must be greater equal than the specified mode."); + } + if( p == 0 ){ + throw std::length_error("Error in boost::numeric::ublas::ttm:Rank must be greater than zero."); + } + if(c == nullptr || a == nullptr || b == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); + } + for(auto i = 0u; i < m-1; ++i){ + if(na[i] != nc[i]){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Extents (except of dimension mode) of A and C must be equal."); + } + } + for(auto i = m; i < p; ++i){ + if(na[i] != nc[i]){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Extents (except of dimension mode) of A and C must be equal."); + } + } + if(na[m-1] != nb[1]){ + throw std::length_error("Error in boost::numeric::ublas::ttm: 2nd Extent of B and M-th Extent of A must be the equal."); + } + if(nc[m-1] != nb[0]){ + throw std::length_error("Error in boost::numeric::ublas::ttm: 1nd Extent of B and M-th Extent of C must be the equal."); + } + + + if ( m != 1 ){ + detail::recursive::ttm (m-1, p-1, c, nc, wc, a, na, wa, b, nb, wb); + } + else{ /*if (m == 1 && p > 2)*/ + detail::recursive::ttm0( p-1, c, nc, wc, a, na, wa, b, nb, wb); + } } @@ -769,39 +780,44 @@ void ttt(SizeType const pa, SizeType const pb, SizeType const q, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - static_assert( std::is_pointer::value & std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types."); - - if( pa == 0 || pb == 0) - throw std::length_error("Error in boost::numeric::ublas::ttt: tensor order must be greater zero."); - - if( q > pa && q > pb) - throw std::length_error("Error in boost::numeric::ublas::ttt: number of contraction must be smaller than or equal to the tensor order."); - - - SizeType const r = pa - q; - SizeType const s = pb - q; - - if(c == nullptr || a == nullptr || b == nullptr) - throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); - - for(auto i = 0ul; i < r; ++i) - if( na[phia[i]-1] != nc[i] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and res tensor not correct."); - - for(auto i = 0ul; i < s; ++i) - if( nb[phib[i]-1] != nc[r+i] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of rhs and res not correct."); - - for(auto i = 0ul; i < q; ++i) - if( nb[phib[s+i]-1] != na[phia[r+i]-1] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and rhs not correct."); - - - if(q == 0ul) - detail::recursive::outer(SizeType{0},r,s, phia,phib, c,nc,wc, a,na,wa, b,nb,wb); - else - detail::recursive::ttt(SizeType{0},r,s,q, phia,phib, c,nc,wc, a,na,wa, b,nb,wb); + static_assert( std::is_pointer::value && std::is_pointer::value && std::is_pointer::value, + "Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types."); + + if( pa == 0 || pb == 0){ + throw std::length_error("Error in boost::numeric::ublas::ttt: tensor order must be greater zero."); + } + + if( q > pa && q > pb) { + throw std::length_error("Error in boost::numeric::ublas::ttt: number of contraction must be smaller than or equal to the tensor order."); + } + + SizeType const r = pa - q; + SizeType const s = pb - q; + + if(c == nullptr || a == nullptr || b == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); + } + for(auto i = 0ul; i < r; ++i){ + if( na[phia[i]-1] != nc[i] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and res tensor not correct."); + } + } + for(auto i = 0ul; i < s; ++i){ + if( nb[phib[i]-1] != nc[r+i] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of rhs and res not correct."); + } + } + for(auto i = 0ul; i < q; ++i){ + if( nb[phib[s+i]-1] != na[phia[r+i]-1] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and rhs not correct."); + } + } + if(q == 0ul){ + detail::recursive::outer(SizeType{0},r,s, phia,phib, c,nc,wc, a,na,wa, b,nb,wb); + } + else{ + detail::recursive::ttt(SizeType{0},r,s,q, phia,phib, c,nc,wc, a,na,wa, b,nb,wb); + } } @@ -836,45 +852,51 @@ void ttt(SizeType const pa, SizeType const pb, SizeType const q, PointerIn1 a, SizeType const*const na, SizeType const*const wa, PointerIn2 b, SizeType const*const nb, SizeType const*const wb) { - static_assert( std::is_pointer::value & std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types."); - - if( pa == 0 || pb == 0) - throw std::length_error("Error in boost::numeric::ublas::ttt: tensor order must be greater zero."); - - if( q > pa && q > pb) - throw std::length_error("Error in boost::numeric::ublas::ttt: number of contraction must be smaller than or equal to the tensor order."); - - - SizeType const r = pa - q; - SizeType const s = pb - q; - SizeType const pc = r+s; - - if(c == nullptr || a == nullptr || b == nullptr) - throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); - - for(auto i = 0ul; i < r; ++i) - if( na[i] != nc[i] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and res tensor not correct."); - - for(auto i = 0ul; i < s; ++i) - if( nb[i] != nc[r+i] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of rhs and res not correct."); - - for(auto i = 0ul; i < q; ++i) - if( nb[s+i] != na[r+i] ) - throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and rhs not correct."); - - using value_type = std::decay_t; + static_assert( std::is_pointer::value && std::is_pointer::value && std::is_pointer::value, + "Static error in boost::numeric::ublas::ttm: Argument types for pointers are not pointer types."); + + if( pa == 0 || pb == 0){ + throw std::length_error("Error in boost::numeric::ublas::ttt: tensor order must be greater zero."); + } + if( q > pa && q > pb){ + throw std::length_error("Error in boost::numeric::ublas::ttt: number of contraction must be smaller than or equal to the tensor order."); + } + + SizeType const r = pa - q; + SizeType const s = pb - q; + SizeType const pc = r+s; + + if(c == nullptr || a == nullptr || b == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::ttm: Pointers shall not be null pointers."); + } + for(auto i = 0ul; i < r; ++i){ + if( na[i] != nc[i] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and res tensor not correct."); + } + } + for(auto i = 0ul; i < s; ++i){ + if( nb[i] != nc[r+i] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of rhs and res not correct."); + } + } + for(auto i = 0ul; i < q; ++i){ + if( nb[s+i] != na[r+i] ){ + throw std::length_error("Error in boost::numeric::ublas::ttt: dimensions of lhs and rhs not correct."); + } + } + using value_type = std::decay_t; - if(q == 0ul) - detail::recursive::outer(pa, pc-1, c,nc,wc, pa-1, a,na,wa, pb-1, b,nb,wb); - else if(r == 0ul && s == 0ul) - *c = detail::recursive::inner(q-1, na, a,wa, b,wb, value_type(0) ); - else - detail::recursive::ttt(SizeType{0},r,s,q, c,nc,wc, a,na,wa, b,nb,wb); + if(q == 0ul){ + detail::recursive::outer(pa, pc-1, c,nc,wc, pa-1, a,na,wa, pb-1, b,nb,wb); + } + else if(r == 0ul && s == 0ul){ + *c = detail::recursive::inner(q-1, na, a,wa, b,wb, value_type(0) ); + } + else{ + detail::recursive::ttt(SizeType{0},r,s,q, c,nc,wc, a,na,wa, b,nb,wb); + } } @@ -900,14 +922,16 @@ auto inner(const SizeType p, SizeType const*const n, const PointerIn2 b, SizeType const*const wb, value_t v) { - static_assert( std::is_pointer::value && std::is_pointer::value, - "Static error in boost::numeric::ublas::inner: Argument types for pointers must be pointer types."); - if(p<2) - throw std::length_error("Error in boost::numeric::ublas::inner: Rank must be greater than zero."); - if(a == nullptr || b == nullptr) - throw std::length_error("Error in boost::numeric::ublas::inner: Pointers shall not be null pointers."); + static_assert( std::is_pointer::value && std::is_pointer::value, + "Static error in boost::numeric::ublas::inner: Argument types for pointers must be pointer types."); + if(p<2){ + throw std::length_error("Error in boost::numeric::ublas::inner: Rank must be greater than zero."); + } + if(a == nullptr || b == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::inner: Pointers shall not be null pointers."); + } - return detail::recursive::inner(p-1, n, a, wa, b, wb, v); + return detail::recursive::inner(p-1, n, a, wa, b, wb, v); } @@ -936,24 +960,25 @@ void outer(PointerOut c, SizeType const pc, SizeType const*const nc, SizeT const PointerIn1 a, SizeType const pa, SizeType const*const na, SizeType const*const wa, const PointerIn2 b, SizeType const pb, SizeType const*const nb, SizeType const*const wb) { - static_assert( std::is_pointer::value & std::is_pointer::value & std::is_pointer::value, - "Static error in boost::numeric::ublas::outer: argument types for pointers must be pointer types."); - if(pa < 2u || pb < 2u) - throw std::length_error("Error in boost::numeric::ublas::outer: number of extents of lhs and rhs tensor must be equal or greater than two."); - if((pa + pb) != pc) - throw std::length_error("Error in boost::numeric::ublas::outer: number of extents of lhs plus rhs tensor must be equal to the number of extents of C."); - if(a == nullptr || b == nullptr || c == nullptr) - throw std::length_error("Error in boost::numeric::ublas::outer: pointers shall not be null pointers."); - - detail::recursive::outer(pa, pc-1, c, nc, wc, pa-1, a, na, wa, pb-1, b, nb, wb); + static_assert( std::is_pointer::value && std::is_pointer::value && std::is_pointer::value, + "Static error in boost::numeric::ublas::outer: argument types for pointers must be pointer types."); + if(pa < 2u || pb < 2u){ + throw std::length_error("Error in boost::numeric::ublas::outer: number of extents of lhs and rhs tensor must be equal or greater than two."); + } + if((pa + pb) != pc){ + throw std::length_error("Error in boost::numeric::ublas::outer: number of extents of lhs plus rhs tensor must be equal to the number of extents of C."); + } + if(a == nullptr || b == nullptr || c == nullptr){ + throw std::length_error("Error in boost::numeric::ublas::outer: pointers shall not be null pointers."); + } + + detail::recursive::outer(pa, pc-1, c, nc, wc, pa-1, a, na, wa, pb-1, b, nb, wb); } -} -} -} +} // namespace boost::numeric::ublas #endif diff --git a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp index c6679a3d7..fa89d431f 100644 --- a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp +++ b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -16,16 +16,14 @@ #include "expression_evaluation.hpp" #include "multi_index_utility.hpp" #include "functions.hpp" -#include +#include "type_traits.hpp" #include #include #include -namespace boost{ -namespace numeric{ -namespace ublas { - +namespace boost::numeric::ublas +{ template class tensor_core; @@ -37,15 +35,14 @@ class matrix_expression; template class vector_expression; -} -} -} +} // namespace boost::numeric::ublas + template inline -constexpr auto operator*( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::vector_expression const& rhs) noexcept + constexpr auto operator*( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::vector_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::multiplies<>{}); @@ -53,9 +50,9 @@ constexpr auto operator*( template inline -constexpr auto operator+( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::vector_expression const& rhs) noexcept + constexpr auto operator+( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::vector_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::plus<>{}); @@ -63,9 +60,9 @@ constexpr auto operator+( template inline -constexpr auto operator-( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::vector_expression const& rhs) noexcept + constexpr auto operator-( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::vector_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::minus<>{}); @@ -73,9 +70,9 @@ constexpr auto operator-( template inline -constexpr auto operator/( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::vector_expression const& rhs) noexcept + constexpr auto operator/( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::vector_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::divides<>{}); @@ -84,9 +81,9 @@ constexpr auto operator/( template inline -constexpr auto operator*( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept + constexpr auto operator*( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::multiplies<>{}); @@ -94,9 +91,9 @@ constexpr auto operator*( template inline -constexpr auto operator+( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept + constexpr auto operator+( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::plus<>{}); @@ -104,9 +101,9 @@ constexpr auto operator+( template inline -constexpr auto operator-( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept + constexpr auto operator-( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::minus<>{}); @@ -114,9 +111,9 @@ constexpr auto operator-( template inline -constexpr auto operator/( - boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, - boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept + constexpr auto operator/( + boost ::numeric ::ublas ::detail ::tensor_expression const& lhs, + boost ::numeric ::ublas ::matrix_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::divides<>{}); @@ -125,9 +122,9 @@ constexpr auto operator/( template inline -constexpr auto operator*( - boost ::numeric ::ublas ::vector_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator*( + boost ::numeric ::ublas ::vector_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::multiplies<>{}); @@ -135,9 +132,9 @@ constexpr auto operator*( template inline -constexpr auto operator+( - boost ::numeric ::ublas ::vector_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator+( + boost ::numeric ::ublas ::vector_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::plus<>{}); @@ -145,9 +142,9 @@ constexpr auto operator+( template inline -constexpr auto operator-( - boost ::numeric ::ublas ::vector_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator-( + boost ::numeric ::ublas ::vector_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::minus<>{}); @@ -155,9 +152,9 @@ constexpr auto operator-( template inline -constexpr auto operator/( - boost ::numeric ::ublas ::vector_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator/( + boost ::numeric ::ublas ::vector_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::divides<>{}); @@ -166,9 +163,9 @@ constexpr auto operator/( template inline -constexpr auto operator*( - boost ::numeric ::ublas ::matrix_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator*( + boost ::numeric ::ublas ::matrix_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::multiplies<>{}); @@ -176,9 +173,9 @@ constexpr auto operator*( template inline -constexpr auto operator+( - boost ::numeric ::ublas ::matrix_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator+( + boost ::numeric ::ublas ::matrix_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::plus<>{}); @@ -186,9 +183,9 @@ constexpr auto operator+( template inline -constexpr auto operator-( - boost ::numeric ::ublas ::matrix_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator-( + boost ::numeric ::ublas ::matrix_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::minus<>{}); @@ -196,9 +193,9 @@ constexpr auto operator-( template inline -constexpr auto operator/( - boost ::numeric ::ublas ::matrix_expression const& lhs, - boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept + constexpr auto operator/( + boost ::numeric ::ublas ::matrix_expression const& lhs, + boost ::numeric ::ublas ::detail ::tensor_expression const& rhs) noexcept { return boost ::numeric ::ublas ::detail ::make_binary_tensor_expression( lhs(), rhs(), std::divides<>{}); @@ -207,191 +204,191 @@ constexpr auto operator/( template inline -constexpr auto operator+( boost::numeric::ublas::detail::tensor_expression const& lhs, + constexpr auto operator+( boost::numeric::ublas::detail::tensor_expression const& lhs, boost::numeric::ublas::detail::tensor_expression const& rhs) { - - static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, - "operator+() : LHS tensor and RHS tensor should have the same value type" - ); - - if constexpr( !std::is_same_v ){ - auto const& e = boost::numeric::ublas::detail::retrieve_extents(rhs); - - if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ - throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); - } + + static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, + "operator+() : LHS tensor and RHS tensor should have the same value type" + ); + + if constexpr( !std::is_same_v ){ + auto const& e = boost::numeric::ublas::detail::retrieve_extents(rhs); + + if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ + throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); } + } - return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l + r; }); + return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l + r; }); } template inline -constexpr auto operator-( boost::numeric::ublas::detail::tensor_expression const& lhs, + constexpr auto operator-( boost::numeric::ublas::detail::tensor_expression const& lhs, boost::numeric::ublas::detail::tensor_expression const& rhs) { - - static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, - "operator-() : LHS tensor and RHS tensor should have the same value type" - ); - if constexpr( !std::is_same_v ){ - auto e = boost::numeric::ublas::detail::retrieve_extents(rhs); + static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, + "operator-() : LHS tensor and RHS tensor should have the same value type" + ); + + if constexpr( !std::is_same_v ){ + auto e = boost::numeric::ublas::detail::retrieve_extents(rhs); - if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ - throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); - } + if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ + throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); } + } - return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l - r; }); -// return boost::numeric::ublas::detail::make_lambda([&lhs,&rhs](std::size_t i){ return lhs(i) - rhs(i);}); + return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l - r; }); + // return boost::numeric::ublas::detail::make_lambda([&lhs,&rhs](std::size_t i){ return lhs(i) - rhs(i);}); } template inline -constexpr auto operator*( boost::numeric::ublas::detail::tensor_expression const& lhs, + constexpr auto operator*( boost::numeric::ublas::detail::tensor_expression const& lhs, boost::numeric::ublas::detail::tensor_expression const& rhs) { - - static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, - "operator*() : LHS tensor and RHS tensor should have the same value type" - ); - if constexpr( !std::is_same_v ){ - auto const& e = boost::numeric::ublas::detail::retrieve_extents(rhs); + static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, + "operator*() : LHS tensor and RHS tensor should have the same value type" + ); - if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ - throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); - } + if constexpr( !std::is_same_v ){ + auto const& e = boost::numeric::ublas::detail::retrieve_extents(rhs); + + if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ + throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); } + } - return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l * r; }); + return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), [](auto const& l, auto const& r){ return l * r; }); } template inline -constexpr auto operator/( boost::numeric::ublas::detail::tensor_expression const& lhs, + constexpr auto operator/( boost::numeric::ublas::detail::tensor_expression const& lhs, boost::numeric::ublas::detail::tensor_expression const& rhs) { - - static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, - "operator/() : LHS tensor and RHS tensor should have the same value type" - ); - if constexpr( !std::is_same_v ){ - auto e = boost::numeric::ublas::detail::retrieve_extents(rhs); + static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, + "operator/() : LHS tensor and RHS tensor should have the same value type" + ); + + if constexpr( !std::is_same_v ){ + auto e = boost::numeric::ublas::detail::retrieve_extents(rhs); - if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ - throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); - } + if( !boost::numeric::ublas::detail::all_extents_equal(lhs,e) ){ + throw std::runtime_error("operator+() : LHS tensor and RHS tensor should have equal extents"); } + } - return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), std::divides<>{}); + return boost::numeric::ublas::detail::make_binary_tensor_expression (lhs(), rhs(), std::divides<>{}); } // Overloaded Arithmetic Operators with Scalars template inline -constexpr auto operator+(typename boost::numeric::ublas::tensor_core::const_reference lhs, + constexpr auto operator+(typename boost::numeric::ublas::tensor_core::const_reference lhs, boost::numeric::ublas::detail::tensor_expression,R> const& rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs + r; }); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs + r; }); } template inline -constexpr auto operator-(typename boost::numeric::ublas::tensor_core::const_reference lhs, + constexpr auto operator-(typename boost::numeric::ublas::tensor_core::const_reference lhs, boost::numeric::ublas::detail::tensor_expression,R> const& rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs - r; }); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs - r; }); } template inline -constexpr auto operator*(typename boost::numeric::ublas::tensor_core::const_reference lhs, + constexpr auto operator*(typename boost::numeric::ublas::tensor_core::const_reference lhs, boost::numeric::ublas::detail::tensor_expression,R> const& rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs * r; }); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs * r; }); } template inline -constexpr auto operator/(typename boost::numeric::ublas::tensor_core::const_reference lhs, + constexpr auto operator/(typename boost::numeric::ublas::tensor_core::const_reference lhs, boost::numeric::ublas::detail::tensor_expression,R> const& rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs / r; }); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (rhs(), [lhs](auto const& r){ return lhs / r; }); } template inline -constexpr auto operator+(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, + constexpr auto operator+(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, typename boost::numeric::ublas::tensor_core::const_reference rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l + rhs; } ); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l + rhs; } ); } template inline -constexpr auto operator-(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, + constexpr auto operator-(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, typename boost::numeric::ublas::tensor_core::const_reference rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l - rhs; } ); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l - rhs; } ); } template inline -constexpr auto operator*(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, + constexpr auto operator*(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, typename boost::numeric::ublas::tensor_core::const_reference rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l * rhs; } ); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l * rhs; } ); } template inline -constexpr auto operator/(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, + constexpr auto operator/(boost::numeric::ublas::detail::tensor_expression,L> const& lhs, typename boost::numeric::ublas::tensor_core::const_reference rhs) noexcept { - using tensor_core_type = boost::numeric::ublas::tensor_core; - return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l / rhs; } ); + using tensor_core_type = boost::numeric::ublas::tensor_core; + return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), [rhs] (auto const& l) { return l / rhs; } ); } template inline -constexpr auto& operator += (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator += (boost::numeric::ublas::tensor_core& lhs, const boost::numeric::ublas::detail::tensor_expression,D> &expr) { - boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l+=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l+=r; } ); + return lhs; } template inline -constexpr auto& operator -= (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator -= (boost::numeric::ublas::tensor_core& lhs, const boost::numeric::ublas::detail::tensor_expression,D> &expr) { - boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l-=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l-=r; } ); + return lhs; } template inline -constexpr auto& operator *= (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator *= (boost::numeric::ublas::tensor_core& lhs, const boost::numeric::ublas::detail::tensor_expression,D> &expr) { - boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l*=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l*=r; } ); + return lhs; } template inline -constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, const boost::numeric::ublas::detail::tensor_expression,D> &expr) { - boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l/=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l/=r; } ); + return lhs; } @@ -399,36 +396,36 @@ constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, template inline -constexpr auto& operator += (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator += (boost::numeric::ublas::tensor_core& lhs, typename boost::numeric::ublas::tensor_core::const_reference r) { - boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l+=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l+=r; } ); + return lhs; } template inline -constexpr auto& operator -= (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator -= (boost::numeric::ublas::tensor_core& lhs, typename boost::numeric::ublas::tensor_core::const_reference r) { - boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l-=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l-=r; } ); + return lhs; } template inline -constexpr auto& operator *= (boost::numeric::ublas::tensor_core& lhs, + constexpr auto& operator *= (boost::numeric::ublas::tensor_core& lhs, typename boost::numeric::ublas::tensor_core::const_reference r) { - boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l*=r; } ); - return lhs; + boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l*=r; } ); + return lhs; } template constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, typename boost::numeric::ublas::tensor_core::const_reference r) { - boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l/=r; } ); + boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l/=r; } ); return lhs; } @@ -438,15 +435,15 @@ constexpr auto& operator /= (boost::numeric::ublas::tensor_core& l template -inline -constexpr auto const& operator +(const boost::numeric::ublas::detail::tensor_expression& lhs) noexcept{ - return lhs; +inline constexpr + auto const& operator +(const boost::numeric::ublas::detail::tensor_expression& lhs) noexcept{ + return lhs; } template -inline -constexpr auto operator -(boost::numeric::ublas::detail::tensor_expression const& lhs) { - return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), std::negate<>{} ); +inline constexpr + auto operator -(boost::numeric::ublas::detail::tensor_expression const& lhs) { + return boost::numeric::ublas::detail::make_unary_tensor_expression (lhs(), std::negate<>{} ); } @@ -459,33 +456,53 @@ constexpr auto operator -(boost::numeric::ublas::detail::tensor_expression template auto operator*( - std::pair< tensor_type_left const&, tuple_type_left > lhs, - std::pair< tensor_type_right const&, tuple_type_right > rhs) + std::pair< tensor_type_left const&, tuple_type_left > lhs, + std::pair< tensor_type_right const&, tuple_type_right > rhs) { - using namespace boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; - auto const& tensor_left = lhs.first; - auto const& tensor_right = rhs.first; + auto const& tensor_left = lhs.first; + auto const& tensor_right = rhs.first; - auto multi_index_left = lhs.second; - auto multi_index_right = rhs.second; + auto multi_index_left = lhs.second; + auto multi_index_right = rhs.second; - static constexpr auto num_equal_ind = number_equal_indexes::value; + static constexpr auto num_equal_ind = ublas::number_equal_indexes::value; - if constexpr ( num_equal_ind == 0 ){ - return tensor_left * tensor_right; - } - else if constexpr ( num_equal_ind==std::tuple_size::value && std::is_same::value ){ + if constexpr ( num_equal_ind == 0 ){ + return tensor_left * tensor_right; + } + else if constexpr ( num_equal_ind==std::tuple_size::value && std::is_same::value ){ - return boost::numeric::ublas::inner_prod( tensor_left, tensor_right ); - } - else { - auto array_index_pairs = index_position_pairs(multi_index_left,multi_index_right); - auto index_pairs = array_to_vector( array_index_pairs ); - return boost::numeric::ublas::prod( tensor_left, tensor_right, index_pairs.first, index_pairs.second ); + return ublas::inner_prod( tensor_left, tensor_right ); + } + else { + auto index_pairs = ublas::index_position_pairs(multi_index_left,multi_index_right); + constexpr auto size = std::tuple_size_v; + + using extents_left_type = typename tensor_type_left ::extents_type; + using extents_right_type = typename tensor_type_right::extents_type; + + constexpr bool has_dynamic_extents = ublas::is_dynamic_rank_v || ublas::is_dynamic_rank_v; + + using index_tuple = std::conditional_t, std::array>; + + auto phi_left = index_tuple{}; + auto phi_right = index_tuple{}; + + if constexpr(has_dynamic_extents) { + phi_left .resize(size); + phi_right.resize(size); } + std::transform(index_pairs.begin(), index_pairs.end(), phi_left .begin(), [](auto a){ return a.first +1ul; } ); + std::transform(index_pairs.begin(), index_pairs.end(), phi_right.begin(), [](auto b){ return b.second +1ul; } ); + +// auto index_pairs = ublas::array_to_vector( array_index_pairs ); + return ublas::prod( tensor_left, tensor_right, phi_left, phi_right ); + } + } #endif diff --git a/include/boost/numeric/ublas/tensor/operators_comparison.hpp b/include/boost/numeric/ublas/tensor/operators_comparison.hpp index 7516c1731..efc6c7323 100644 --- a/include/boost/numeric/ublas/tensor/operators_comparison.hpp +++ b/include/boost/numeric/ublas/tensor/operators_comparison.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,20 +12,23 @@ #ifndef BOOST_UBLAS_TENSOR_OPERATORS_COMPARISON_HPP #define BOOST_UBLAS_TENSOR_OPERATORS_COMPARISON_HPP -#include -#include -#include #include #include #include #include +#include "extents.hpp" +#include "expression.hpp" +#include "type_traits.hpp" +#include "expression_evaluation.hpp" + namespace boost::numeric::ublas { template class tensor_core; -} +} // namespace boost::numeric::ublas -namespace boost::numeric::ublas::detail { +namespace boost::numeric::ublas::detail +{ template [[nodiscard]] inline @@ -36,7 +39,7 @@ constexpr bool compare(tensor_core const& lhs, tensor_core const& rhs, B "LHS and RHS both should have the same value type" ); - if(lhs.extents() != rhs.extents()){ + if(::operator!=(lhs.extents(),rhs.extents())){ if constexpr(!std::is_same>::value && !std::is_same>::value) throw std::runtime_error( "boost::numeric::ublas::detail::compare(tensor_core const&, tensor_core const&, BinaryPred) : " @@ -95,43 +98,49 @@ constexpr bool compare(tensor_expression const& expr, UnaryPred pred) return compare(T( expr ), pred); } -} +} // namespace boost::numeric::ublas::detail template -[[nodiscard]] inline -constexpr bool operator==( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +[[nodiscard]] inline +constexpr bool operator==( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::equal_to<>{} ); } template [[nodiscard]] inline -constexpr auto operator!=(boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +constexpr auto operator!=( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::not_equal_to<>{} ); } template [[nodiscard]] inline -constexpr auto operator< ( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +constexpr auto operator< ( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::less<>{} ); } template [[nodiscard]] inline -constexpr auto operator<=( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +constexpr auto operator<=( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::less_equal<>{} ); } template [[nodiscard]] inline -constexpr auto operator> ( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +constexpr auto operator> ( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::greater<>{} ); } template [[nodiscard]] inline -constexpr auto operator>=( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) { +constexpr auto operator>=( + boost::numeric::ublas::detail::tensor_expression const& lhs, + boost::numeric::ublas::detail::tensor_expression const& rhs) { return boost::numeric::ublas::detail::compare( lhs, rhs, std::greater_equal<>{} ); } diff --git a/include/boost/numeric/ublas/tensor/ostream.hpp b/include/boost/numeric/ublas/tensor/ostream.hpp index 1940546a3..2ce7940cc 100644 --- a/include/boost/numeric/ublas/tensor/ostream.hpp +++ b/include/boost/numeric/ublas/tensor/ostream.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2019, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,25 +12,28 @@ #ifndef BOOST_UBLAS_TENSOR_OSTREAM_HPP #define BOOST_UBLAS_TENSOR_OSTREAM_HPP -#include + +#include "extents/extents_functions.hpp" + + #include -#include +#include + -namespace boost { -namespace numeric { -namespace ublas { -namespace detail { + +namespace boost::numeric::ublas::detail +{ template void print(std::ostream& out, value_type const& p) { - out << p << " "; + out << p << " "; } template void print(std::ostream& out, const std::complex& p) { - out << std::real(p) << "+" << std::imag(p) << "i "; + out << std::real(p) << "+" << std::imag(p) << "i "; } @@ -38,95 +41,80 @@ template void print(std::ostream& out, size_type r, const value_type* p, const size_type* w, const size_type* n) { - if(r < 2) + if(r < 2) + { + out << "[ ... " << std::endl; + + for(auto row = 0u; row < n[0]; p += w[0], ++row) // iterate over one column { - out << "[ ... " << std::endl; - - for(auto row = 0u; row < n[0]; p += w[0], ++row) // iterate over one column - { - auto const* p1 = p; - for(auto col = 0u; col < n[1]; p1 += w[1], ++col) // iterate over first row - { - print(out,*p1); - } - if(row < n[0]-1) - out << "; " << std::endl; - } - out << "]"; + auto const* p1 = p; + for(auto col = 0u; col < n[1]; p1 += w[1], ++col) // iterate over first row + { + print(out,*p1); + } + if(row < n[0]-1){ + out << "; " << std::endl; + } } - else - { - out << "cat("<< r+1 <<",..." << std::endl; - for(auto d = 0u; d < n[r]-1; p += w[r], ++d){ - print(out, r-1, p, w, n); - out << ",..." << std::endl; - } - print(out, r-1, p, w, n); + out << "]"; + } + else + { + out << "cat("<< r+1 <<",..." << std::endl; + for(auto d = 0u; d < n[r]-1; p += w[r], ++d){ + print(out, r-1, p, w, n); + out << ",..." << std::endl; } - if(r>1) - out << ")"; + print(out, r-1, p, w, n); + } + if(r>1){ + out << ")"; + } } //////////////////////////// -} -} -} -} +} // namespace boost::numeric::ublas::detail -namespace boost { -namespace numeric { -namespace ublas { +namespace boost::numeric::ublas +{ template class tensor_core; -template -class matrix; - -template -class vector; - -} -} -} +} //namespace boost::numeric::ublas template -std::ostream& operator << (std::ostream& out, boost::numeric::ublas::tensor_core const& t) +std::ostream& operator << (std::ostream& out, class boost::numeric::ublas::tensor_core const& t) { - if(is_scalar(t.extents())){ - out << '['; - boost::numeric::ublas::detail::print(out,t[0]); - out << ']'; - } - else if(is_vector(t.extents())) { - const auto& cat = t.extents().at(0) > t.extents().at(1) ? ';' : ','; - out << '['; - for(auto i = 0u; i < t.size()-1; ++i){ - boost::numeric::ublas::detail::print(out,t[i]); - out << cat << ' '; - } - boost::numeric::ublas::detail::print(out,t[t.size()-1]); - out << ']'; + namespace ublas = boost::numeric::ublas; + + auto const& n = t.extents(); + auto const& w = t.strides(); + + if(is_scalar(n)){ + out << '['; + ublas::detail::print(out,t[0]); + out << ']'; + } + else if(is_vector(n)) { + const auto& cat = n.at(0) > n.at(1) ? ';' : ','; + out << '['; + for(auto i = 0u; i < t.size()-1; ++i){ + ublas::detail::print(out,t[i]); + out << cat << ' '; } - else{ - boost::numeric::ublas::detail::print(out, t.rank()-1, t.data(), t.strides().data(), t.extents().data()); - } - return out; -} - -template - || boost::numeric::ublas::is_extents_v - , int> = 0 -> -std::ostream& operator<<(std::ostream& os, T const& e){ - return os< -#include -#include - -namespace boost::numeric::ublas { - -template class basic_static_extents; - -/** @brief Template class for storing tensor extents for compile time. - * - * @code basic_static_extents<1,2,3,4> t @endcode - * @tparam E parameter pack of extents - * - */ -template -class basic_static_extents{ - -public: - - static constexpr auto _size = sizeof...(E); - - using base_type = std::array; - using value_type = typename base_type::value_type; - using size_type = typename base_type::size_type; - using reference = typename base_type::reference; - using const_reference = typename base_type::const_reference; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - static_assert( std::numeric_limits::is_integer, "Static error in basic_static_extents: type must be of type integer."); - static_assert(!std::numeric_limits::is_signed, "Static error in basic_static_extents: type must be of type unsigned integer."); - - //@returns the rank of basic_static_extents - [[nodiscard]] inline - constexpr size_type size() const noexcept { return _size; } - - /** - * @param k pos of extent - * @returns the element at given pos - */ - [[nodiscard]] inline - static constexpr const_reference at(size_type k){ - return m_data.at(k); - } - - [[nodiscard]] inline - constexpr const_reference operator[](size_type k) const{ - return m_data[k]; - } - - constexpr basic_static_extents() = default; - - constexpr basic_static_extents(basic_static_extents const&) noexcept = default; - constexpr basic_static_extents(basic_static_extents &&) noexcept = default; - - constexpr basic_static_extents& operator=(basic_static_extents const&) noexcept = default; - constexpr basic_static_extents& operator=(basic_static_extents &&) noexcept = default; - - ~basic_static_extents() = default; - - /** @brief Returns ref to the std::array containing extents */ - [[nodiscard]] inline - constexpr base_type const& base() const noexcept{ - return m_data; - } - - /** @brief Returns pointer to the std::array containing extents */ - [[nodiscard]] inline - constexpr const_pointer data() const noexcept{ - return m_data.data(); - } - - /** @brief Checks if extents is empty or not - * - * @returns true if rank is 0 else false - * - */ - [[nodiscard]] inline - constexpr bool empty() const noexcept { return m_data.empty(); } - - [[nodiscard]] inline - constexpr const_reference back() const{ - return m_data.back(); - } - - [[nodiscard]] inline - constexpr const_iterator begin() const noexcept{ - return m_data.begin(); - } - - [[nodiscard]] inline - constexpr const_iterator end() const noexcept{ - return m_data.end(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rbegin() const noexcept - { - return m_data.rbegin(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rend() const noexcept - { - return m_data.rend(); - } - - /// msvc 14.27 does not consider 'at' function constexpr. - /// To make msvc happy get function is declared - /// and it will be removed when we start using boost.mp11 - template - static constexpr auto get() noexcept{ - static_assert(I < _size, - "boost::numeric::ublas::basic_static_extents::get() : " - "out of bound access" - ); - using element_at = std::tuple_element_t; - return element_at{}; - } - -private: - static constexpr base_type const m_data{E...}; - /// will be removed when we start using boost.mp11 - using tuple_type = std::tuple< std::integral_constant... >; -}; - - -template -using static_extents = basic_static_extents; - -} // namespace boost::numeric::ublas -#endif diff --git a/include/boost/numeric/ublas/tensor/static_strides.hpp b/include/boost/numeric/ublas/tensor/static_strides.hpp deleted file mode 100644 index f43101b8c..000000000 --- a/include/boost/numeric/ublas/tensor/static_strides.hpp +++ /dev/null @@ -1,267 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// -/// \file strides.hpp Definition for the basic_strides template class - -#ifndef BOOST_UBLAS_TENSOR_STATIC_STRIDES_HPP -#define BOOST_UBLAS_TENSOR_STATIC_STRIDES_HPP - -#include -#include -#include - -namespace boost::numeric::ublas{ - - template class basic_static_strides; - -} // boost::numeric::ublas - -namespace boost::numeric::ublas::detail{ - - namespace impl{ - - // concat two static_stride_list togather - // @code using type = typename concat< static_stride_list, static_stride_list >::type @endcode - template - struct concat; - - template - struct concat< basic_static_extents, basic_static_extents > { - using type = basic_static_extents; - }; - - template - using concat_t = typename concat::type; - - // generates static_stride_list containing ones with specific size - template - struct make_sequence_of_ones; - - template - using make_sequence_of_ones_t = typename make_sequence_of_ones::type; - - template - struct make_sequence_of_ones { - using type = concat_t, make_sequence_of_ones_t>; - }; - - template - struct make_sequence_of_ones { - using type = basic_static_extents; - }; - template - struct make_sequence_of_ones{ - using type = basic_static_extents; - }; - - template - struct extents_to_array; - - template - inline static constexpr auto extents_to_array_v = extents_to_array::value; - - template - struct extents_to_array< basic_static_extents > - { - static constexpr std::array const value = {Es...}; - }; - - } // impl - - - template - using make_sequence_of_ones_t = impl::make_sequence_of_ones_t; - - template - constexpr auto make_static_strides_first_order( [[maybe_unused]] E const& e, [[maybe_unused]] basic_static_extents const& res ){ - if constexpr( I >= E::_size - 1ul ){ - return impl::extents_to_array_v< basic_static_extents >; - }else{ - using res_type = basic_static_extents; - - constexpr auto prod = E::template get().value * res_type::template get().value; - using nextents = basic_static_extents; - return make_static_strides_first_order(e, nextents{}); - } - } - - template - constexpr auto make_static_strides_last_order( [[maybe_unused]] E const& e, [[maybe_unused]] basic_static_extents const& res ){ - if constexpr( I >= E::_size - 1ul ){ - return impl::extents_to_array_v< basic_static_extents >; - }else{ - using res_type = basic_static_extents; - - constexpr auto J = E::_size - I - 1ul; - constexpr auto K = res_type::_size - I - 1ul; - constexpr auto prod = E::template get().value * res_type::template get().value; - using nextents = basic_static_extents; - return make_static_strides_last_order(e, nextents{}); - } - } - - template - constexpr auto make_static_strides( [[maybe_unused]] E const& e ){ - using value_type = typename E::value_type; - if constexpr( E::_size == 0 ){ - return impl::extents_to_array_v; - }else if constexpr( is_scalar(E{}) || is_vector(E{}) ){ - using extents_with_ones = make_sequence_of_ones_t; - return impl::extents_to_array_v; - }else{ - if constexpr( std::is_same_v ){ - return make_static_strides_first_order(e, basic_static_extents{}); - }else{ - return make_static_strides_last_order(e, basic_static_extents{}); - } - } - } - - // It is use for first order to - // get std::array containing strides - template - inline static constexpr auto strides_helper_v = make_static_strides(ExtentsType{}); - -} // namespace boost::numeric::ublas::detail - -namespace boost::numeric::ublas -{ -/** @brief Partial Specialization for layout::first_order or column_major - * - * @code basic_static_strides, layout::first_order> s @endcode - * - * @tparam R rank of basic_static_extents - * @tparam Extents paramerter pack of extents - * - */ -template -class basic_static_strides, Layout> -{ - -public: - - static constexpr std::size_t const _size = sizeof...(Extents); - - using layout_type = Layout; - using extents_type = basic_static_extents; - using base_type = std::array; - using value_type = typename base_type::value_type; - using reference = typename base_type::reference; - using const_reference = typename base_type::const_reference; - using size_type = typename base_type::size_type; - using const_pointer = typename base_type::const_pointer; - using const_iterator = typename base_type::const_iterator; - using const_reverse_iterator = typename base_type::const_reverse_iterator; - - /** - * @param k pos of extent - * @returns the element at given pos - */ - [[nodiscard]] inline - constexpr const_reference at(size_type k) const - { - return m_data.at(k); - } - - [[nodiscard]] inline - constexpr const_reference operator[](size_type k) const { return m_data[k]; } - - //@returns the rank of basic_static_extents - [[nodiscard]] inline - constexpr size_type size() const noexcept { return static_cast(_size); } - - [[nodiscard]] inline - constexpr const_reference back () const{ - return m_data.back(); - } - - // default constructor - constexpr basic_static_strides() noexcept{ - static_assert( - _size == 0 || - ( is_valid(extents_type{}) && - ( is_vector(extents_type{}) || - is_scalar(extents_type{}) || - _size >= 2 - ) - ) - , - "Error in boost::numeric::ublas::basic_static_strides() : " - "Size cannot be 0 or Shape should be valid and shape can be vector or shape can be scalar or size should be greater than" - " or equal to 2" - ); - - - } - - constexpr basic_static_strides(extents_type const& e) noexcept{ (void)e; }; - - // default copy constructor - constexpr basic_static_strides(basic_static_strides const &other) noexcept = default; - constexpr basic_static_strides(basic_static_strides &&other) noexcept = default; - - // default assign constructor - constexpr basic_static_strides & - operator=(basic_static_strides const &other) noexcept = default; - - constexpr basic_static_strides & - operator=(basic_static_strides &&other) noexcept = default; - - ~basic_static_strides() = default; - - /** @brief Returns ref to the std::array containing extents */ - [[nodiscard]] inline - constexpr auto const& base() const noexcept{ - return m_data; - } - - /** @brief Returns pointer to the std::array containing extents */ - [[nodiscard]] inline - constexpr const_pointer data() const noexcept{ - return m_data.data(); - } - - [[nodiscard]] inline - constexpr const_iterator begin() const noexcept{ - return m_data.begin(); - } - - [[nodiscard]] inline - constexpr const_iterator end() const noexcept{ - return m_data.end(); - } - - [[nodiscard]] inline - constexpr bool empty() const noexcept{ - return m_data.empty(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rbegin() const noexcept - { - return m_data.rbegin(); - } - - [[nodiscard]] inline - constexpr const_reverse_iterator - rend() const noexcept - { - return m_data.rend(); - } - -private: - static constexpr base_type const m_data{ detail::strides_helper_v }; -}; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/strides.hpp b/include/boost/numeric/ublas/tensor/strides.hpp deleted file mode 100644 index eb78c24da..000000000 --- a/include/boost/numeric/ublas/tensor/strides.hpp +++ /dev/null @@ -1,99 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// -/// \file strides.hpp Definition for the basic_strides template class - -#ifndef _BOOST_UBLAS_TENSOR_STRIDES_HPP_ -#define _BOOST_UBLAS_TENSOR_STRIDES_HPP_ - -#include -#include -#include - -namespace boost::numeric::ublas{ - - template && is_strides_v - , int> = 0 - > - [[nodiscard]] inline - constexpr bool operator==(LStrides const& lhs, RStrides const& rhs) noexcept{ - static_assert( std::is_same_v, - "boost::numeric::ublas::operator==(LStrides,RStrides) : LHS value type should be the same as the RHS value type"); - - return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin()); - } - - template && is_strides_v - , int> = 0 - > - [[nodiscard]] inline - constexpr bool operator!=(LStrides const& lhs, RStrides const& rhs) noexcept{ - static_assert( std::is_same_v, - "boost::numeric::ublas::operator!=(LStrides,RStrides) : LHS value type should be the same as the RHS value type"); - return !( lhs == rhs ); - } - -} // namespace boost::numeric::ublas - - -namespace boost::numeric::ublas::detail { - - /** @brief Returns relative memory index with respect to a multi-index - * - * @code auto j = access(std::vector{3,4,5}, strides{shape{4,2,3},first_order}); @endcode - * - * @param[in] i multi-index of length p - * @param[in] w stride vector of length p - * @returns relative memory location depending on \c i and \c w - */ - template - [[nodiscard]] inline - constexpr auto access(std::vector const& i, Stride const& w) - { - static_assert( is_strides_v, - "boost::numeric::ublas::detail::access() : invalid type, the type should be a strides"); - - using value_type = typename Stride::value_type; - return std::inner_product(i.begin(), i.end(), w.begin(), value_type{}); - } - - /** @brief Returns relative memory index with respect to a multi-index - * - * @code auto j = access(strides{shape{4,2,3},first_order}, 2,3,4); @endcode - * - * @param[in] is the elements of the partial multi-index - * @param[in] sum the current relative memory index - * @returns relative memory location depending on \c i and \c w - */ - template - [[nodiscard]] inline - constexpr auto access(Stride const& w, Indices ... is) - { - static_assert( is_strides_v, - "boost::numeric::ublas::detail::access() : invalid type, the type should be a strides"); - - if constexpr( is_static_rank_v ){ - static_assert( Stride::_size >= sizeof...(is), - "boost::numeric::ublas::detail::access() : number of indices exceeds the size of the stride"); - } - - using value_type = typename Stride::value_type; - std::array i = {is...}; - return std::inner_product(i.begin(), i.end(), w.begin(), value_type{}); - } - -} // namespace boost::numeric::ublas::detail - -#endif diff --git a/include/boost/numeric/ublas/tensor/tags.hpp b/include/boost/numeric/ublas/tensor/tags.hpp index 079c2e783..7774f9ccb 100644 --- a/include/boost/numeric/ublas/tensor/tags.hpp +++ b/include/boost/numeric/ublas/tensor/tags.hpp @@ -1,17 +1,15 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany // -#ifndef BOOST_UBLAS_TENSOR_TAGS_IMPL_HPP -#define BOOST_UBLAS_TENSOR_TAGS_IMPL_HPP +#ifndef BOOST_UBLAS_TENSOR_TAGS_HPP +#define BOOST_UBLAS_TENSOR_TAGS_HPP namespace boost::numeric::ublas{ @@ -25,7 +23,7 @@ namespace boost::numeric::ublas{ struct storage_non_seq_container_tag{}; -} // namespace boost::numeric::ublas::tag +} // namespace boost::numeric::ublas -#endif +#endif // BOOST_UBLAS_TENSOR_TAGS_HPP diff --git a/include/boost/numeric/ublas/tensor/tensor.hpp b/include/boost/numeric/ublas/tensor/tensor.hpp index f635f4fbe..02ceaa53a 100644 --- a/include/boost/numeric/ublas/tensor/tensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor.hpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,46 +10,13 @@ // Google and Fraunhofer IOSB, Ettlingen, Germany // -#ifndef BOOST_UBLAS_TENSOR_IMPL_HPP -#define BOOST_UBLAS_TENSOR_IMPL_HPP +#ifndef BOOST_UBLAS_TENSOR_TENSOR_HPP +#define BOOST_UBLAS_TENSOR_TENSOR_HPP -#include -#include +#include "tensor/tensor_core.hpp" +#include "tensor/tensor_dynamic.hpp" +#include "tensor/tensor_engine.hpp" +#include "tensor/tensor_static_rank.hpp" +#include "tensor/tensor_static.hpp" -namespace boost::numeric::ublas{ - - template - using dynamic_tensor = tensor_core< - tensor_engine< - extents<>, - Layout, - strides< extents<> >, - std::vector< ValueType, std::allocator > - > - >; - - - template - using static_tensor = tensor_core< - tensor_engine< - ExtentsType, - Layout, - strides, - std::array< ValueType, product(ExtentsType{}) > - > - >; - - template - using fixed_rank_tensor = tensor_core< - tensor_engine< - extents, - Layout, - strides< extents >, - std::vector< ValueType, std::allocator > - > - >; - -} // namespace boost::numeric::ublas - - -#endif +#endif // BOOST_UBLAS_TENSOR_TENSOR_HPP diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_core.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_core.hpp new file mode 100644 index 000000000..43591af63 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_core.hpp @@ -0,0 +1,27 @@ +// +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +/// \file tensor_core.hpp Definition for the tensor template class + +#ifndef BOOST_UBLAS_TENSOR_CORE_HPP +#define BOOST_UBLAS_TENSOR_CORE_HPP + + +namespace boost::numeric::ublas { + +template +class tensor_core; + +} // namespace boost::numeric::ublas + +#endif // BOOST_UBLAS_TENSOR_CORE_HPP diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp new file mode 100644 index 000000000..ec27296a6 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -0,0 +1,466 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +/// \file tensor_core.hpp Definition for the tensor template class + +#ifndef BOOST_UBLAS_TENSOR_DYNAMIC_HPP +#define BOOST_UBLAS_TENSOR_DYNAMIC_HPP + +#include + +#include "../algorithms.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" +#include "../extents.hpp" +#include "../index.hpp" +#include "../index_functions.hpp" +#include "../layout.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" +#include "../concepts.hpp" + +#include "tensor_engine.hpp" + + +namespace boost::numeric::ublas { + +template +using engine_tensor_dynamic = tensor_engine, L, std::vector>; + +template + class tensor_core> + : public detail::tensor_expression< + tensor_core>, + tensor_core>> +{ +public: + using engine_type = engine_tensor_dynamic; + using self_type = tensor_core; + + template + using tensor_expression_type = detail::tensor_expression; + template + using matrix_expression_type = matrix_expression; + template + using vector_expression_type = vector_expression; + + using super_type = tensor_expression_type; + + using container_type = typename engine_type::container_type; + using layout_type = typename engine_type::layout_type; + using extents_type = typename engine_type::extents_type; + using strides_type = typename extents_type::base_type; + + using container_traits_type = container_traits; + + using size_type = typename container_traits_type::size_type; + using difference_type = typename container_traits_type::difference_type; + using value_type = typename container_traits_type::value_type; + + using reference = typename container_traits_type::reference; + using const_reference = typename container_traits_type::const_reference; + + using pointer = typename container_traits_type::pointer; + using const_pointer = typename container_traits_type::const_pointer; + + using iterator = typename container_traits_type::iterator; + using const_iterator = typename container_traits_type::const_iterator; + + using reverse_iterator = typename container_traits_type::reverse_iterator; + using const_reverse_iterator = typename container_traits_type::const_reverse_iterator; + + using container_tag = typename container_traits_type::container_tag; + using resizable_tag = typename container_traits_type::resizable_tag; + + using matrix_type = matrix >; + using vector_type = vector >; + + explicit tensor_core () = default; + + /** @brief Constructs a tensor_core with a \c shape + * + * @code auto t = tensor{3,4,2}; @endcode + * + */ + template + explicit inline tensor_core (Is ... is) + : tensor_expression_type{} + , _extents{size_type(is)...} + , _strides(ublas::to_strides(_extents,layout_type{})) + , _container(ublas::product(_extents)) + { + } + + + /** @brief Constructs a tensor_core with a \c shape + * + * @code auto t = tensor(extents{3,4,2}); @endcode + * + */ + explicit inline tensor_core (extents_type e) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(ublas::to_strides(_extents,layout_type{})) + , _container(ublas::product(_extents)) + { + } + + /** @brief Constructs a tensor_core with a \c shape and initial value + * + * @code auto t = tensor(extents<>{4,3,2},5); @endcode + * + * @param i initial tensor_core with this value + */ + inline tensor_core (extents_type e, value_type i) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(to_strides(_extents,layout_type{})) + , _container(product(_extents),i) + { + } + + /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data + * + * @code auto t = tensor(extents<>{3,4,2},std::vector(3*4*2,1.f)); @endcode + * + * @param e instance of \c extents<> specifying the dimensions of tensor + * @param a instance of \c std::vector to be copied + */ + inline tensor_core (extents_type e, container_type a) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(ublas::to_strides(_extents,layout_type{})) + , _container(std::move(a)) + { + if(std::size(_container) != ublas::product(_extents)){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core: " + "Cannot construct tensor with specified std::vector instance. " + "Number of extents and std::vector size do not match."); + } + } + + + /** @brief Constructs a tensor_core with another tensor_core with a different layout + * + * @param other tensor_core with a different layout to be copied. + */ + template + explicit inline tensor_core (const tensor_core &other) + : tensor_expression_type{} + , _extents (ublas::begin(other.extents ()), ublas::end (other.extents ())) + , _strides (ublas::begin(other.extents ()), ublas::end (other.extents ())) + , _container( std::begin(other.container()), std::end (other.container())) + { + } + + + /** @brief Constructs a tensor_core with an tensor_core expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note type must be specified of tensor_core must be specified. + * @note dimension extents are extracted from tensors within the expression. + * + * @param expr tensor_core expression + * @param size tensor_core expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (detail::tensor_expression const& expr) + : tensor_expression_type{} + , _extents (ublas::detail::retrieve_extents(expr)) + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(ublas::product(_extents)) + { + detail::eval(*this, expr); + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + explicit tensor_core( matrix_type const& m ) + : tensor_expression_type{} + , _extents {m.size1(),m.size2()} + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(m.data().begin(), m.data().end()) + { + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + explicit tensor_core (vector_type const& v) + : tensor_expression_type{} + , _extents {v.size(),1} + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(v.data().begin(), v.data().end()) + { + } + + /** @brief Constructs a tensor_core with a matrix expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr matrix expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const matrix_expression_type &expr) + : tensor_core(matrix_type(expr)) + { + } + + /** @brief Constructs a tensor_core with a vector expression + * + * @code tensor_core A = b + 3 * b; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr vector expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const vector_expression_type &expr) + : tensor_core( vector_type ( expr ) ) + { + } + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be copied. + */ + inline tensor_core (const tensor_core &t) + : tensor_expression_type{} + , _extents (t._extents ) + , _strides (t._strides ) + , _container(t._container) + {} + + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be moved. + */ + inline tensor_core (tensor_core &&t) noexcept + : tensor_expression_type{} + , _extents (std::move(t._extents )) + , _strides (std::move(t._strides )) + , _container(std::move(t._container)) + {} + + /// @brief Default destructor + ~tensor_core() = default; + + /** @brief Evaluates the tensor_expression and assigns the results to the tensor_core + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must conform with this tensor_core. + * + * @param expr expression that is evaluated. + */ + template + tensor_core &operator = (const tensor_expression_type &expr) + { + detail::eval(*this, expr); + return *this; + } + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + tensor_core& operator=(tensor_core other) noexcept + { + swap (*this, other); + return *this; + } + + tensor_core& operator=(const_reference v) + { + std::fill_n(_container.begin(), _container.size(), v); + return *this; + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline const_reference at (I1 i1, I2 i2, Is ... is) const + { + if(sizeof...(is)+2 != this->order()){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core::at : " + "Cannot access tensor with multi-index. " + "Number of provided indices does not match with tensor order."); + } + const auto idx = ublas::detail::to_index(_strides,i1,i2,is...); + return _container.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline reference at (I1 i1, I2 i2, Is ... is) + { + if(sizeof...(is)+2 != this->order()){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core::at : " + "Cannot access tensor with multi-index." + "Number of provided indices does not match with tensor order."); + } + const auto idx = ublas::detail::to_index(_strides,i1,i2,is...); + return _container.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline const_reference operator()(Is ... is) const + { + return this->at(is...); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline reference operator()(Is ... is) + { + return this->at(is...); + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline const_reference operator [] (size_type i) const { + return this->_container[i]; + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline reference operator [] (size_type i) { + return this->_container[i]; + } + + /** @brief Element access using a single-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i); @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + template + [[nodiscard]] inline const_reference at (size_type i) const { + return this->_container.at(i); + } + + /** @brief Read tensor element of a tensor \c t with a single-index \c i + * + * @code auto a = t.at(i); @endcode + * + * @param i zero-based index where 0 <= i < t.size() + */ + [[nodiscard]] inline reference at (size_type i) { + return this->_container.at(i); + } + + /** @brief Generates a tensor_core index for tensor_core contraction + * + * + * @code auto Ai = A(_i,_j,k); @endcode + * + * @param i placeholder + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline decltype(auto) operator() (index::index_type p, index_types ... ps) const + { + constexpr auto size = sizeof...(ps)+1; + if(size != this->order()){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core : " + "Cannot multiply using Einstein notation. " + "Number of provided indices does not match with tensor order."); + } + return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); + } + + friend void swap(tensor_core& lhs, tensor_core& rhs) + { + std::swap(lhs._extents , rhs._extents); + std::swap(lhs._strides , rhs._strides); + std::swap(lhs._container , rhs._container); + } + + + [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } + [[nodiscard]] inline auto end () const noexcept -> const_iterator { return _container.end (); } + [[nodiscard]] inline auto begin () noexcept -> iterator { return _container.begin (); } + [[nodiscard]] inline auto end () noexcept -> iterator { return _container.end (); } + [[nodiscard]] inline auto cbegin () const noexcept -> const_iterator { return _container.cbegin (); } + [[nodiscard]] inline auto cend () const noexcept -> const_iterator { return _container.cend (); } + [[nodiscard]] inline auto crbegin() const noexcept -> const_reverse_iterator { return _container.crbegin(); } + [[nodiscard]] inline auto crend () const noexcept -> const_reverse_iterator { return _container.crend (); } + [[nodiscard]] inline auto rbegin () const noexcept -> const_reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline auto rend () const noexcept -> const_reverse_iterator { return _container.rend (); } + [[nodiscard]] inline auto rbegin () noexcept -> reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline auto rend () noexcept -> reverse_iterator { return _container.rend (); } + + [[nodiscard]] inline auto empty () const noexcept { return _container.empty(); } + [[nodiscard]] inline auto size () const noexcept { return _container.size(); } + [[nodiscard]] inline auto size (size_type r) const { return _extents.at(r); } + [[nodiscard]] inline auto rank () const { return _extents.size(); } + [[nodiscard]] inline auto order () const { return this->rank(); } + + [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } + [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _container.data();} + [[nodiscard]] inline auto data () noexcept -> pointer { return _container.data();} + [[nodiscard]] inline auto const& base () const noexcept { return _container; } + +private: + extents_type _extents; + strides_type _strides; + container_type _container; +}; + +} // namespace boost::numeric::ublas + +namespace boost::numeric::ublas{ +template +using tensor_dynamic = tensor_core, L, std::vector>>; +} // namespace boost::numeric::ublas + + +#endif + diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp new file mode 100644 index 000000000..dc6cbd790 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp @@ -0,0 +1,29 @@ +// +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#ifndef BOOST_UBLAS_TENSOR_ENGINE_HPP +#define BOOST_UBLAS_TENSOR_ENGINE_HPP + +namespace boost::numeric::ublas{ + +template +struct tensor_engine +{ + using extents_type = E; + using layout_type = L; + using container_type = C; +}; + +} // namespace boost::numeric::ublas + + +#endif diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp new file mode 100644 index 000000000..644ed9c51 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp @@ -0,0 +1,456 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +/// \file tensor_core.hpp Definition for the tensor template class + +#ifndef BOOST_UBLAS_TENSOR_STATIC_HPP +#define BOOST_UBLAS_TENSOR_STATIC_HPP + +#include + +#include "../algorithms.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" +#include "../extents.hpp" +#include "../index.hpp" +#include "../index_functions.hpp" +#include "../layout.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" +#include "../concepts.hpp" + +#include "tensor_engine.hpp" + + + + +namespace boost::numeric::ublas::detail +{ +template +using engine_tensor_static = tensor_engine< + extents, L, std::array>> >; +} // namespace boost::numeric::ublas::detail + +namespace boost::numeric::ublas { +template +class tensor_core> + : public detail::tensor_expression< + tensor_core>, + tensor_core>> +{ +public: + using engine_type = detail::engine_tensor_static; + using self_type = tensor_core; + + template + using tensor_expression_type = detail::tensor_expression; + template + using matrix_expression_type = matrix_expression; + template + using vector_expression_type = vector_expression; + + using super_type = tensor_expression_type; + + using container_type = typename engine_type::container_type; + using layout_type = typename engine_type::layout_type; + using extents_type = typename engine_type::extents_type; + using strides_type = typename extents_type::base_type; + + using container_traits_type = container_traits; + + using size_type = typename container_traits_type::size_type; + using difference_type = typename container_traits_type::difference_type; + using value_type = typename container_traits_type::value_type; + + using reference = typename container_traits_type::reference; + using const_reference = typename container_traits_type::const_reference; + + using pointer = typename container_traits_type::pointer; + using const_pointer = typename container_traits_type::const_pointer; + + using iterator = typename container_traits_type::iterator; + using const_iterator = typename container_traits_type::const_iterator; + + using reverse_iterator = typename container_traits_type::reverse_iterator; + using const_reverse_iterator = typename container_traits_type::const_reverse_iterator; + + using container_tag = typename container_traits_type::container_tag; + using resizable_tag = typename container_traits_type::resizable_tag; + + using matrix_type = matrix >; + using vector_type = vector >; + + static_assert(std::tuple_size_v == ublas::product_v); + static_assert(0ul != ublas::product_v); + + /** @brief Constructs a tensor_core. + * + */ + constexpr inline tensor_core () noexcept = default; + + /** @brief Constructs a tensor_core with a \c shape + * + * + * @code tensor> A(4); @endcode + * + * @param v value with which tensor_core is initialized + */ + constexpr explicit inline tensor_core (value_type v) + : tensor_core() + { + std::fill_n(begin(),this->size(),v); + } + + /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data + * + * @code auto a = tensor>(array); @endcode + * + * @param s initial tensor_core dimension extents + * @param a container of \c array_type that is copied according to the storage layout + */ + constexpr explicit inline tensor_core (container_type a) noexcept + : tensor_expression_type{} + , _container{std::move(a)} + { + } + + + /** @brief Constructs a tensor_core with another tensor_core with a different layout + * + * @param other tensor_core with a different layout to be copied. + */ + template + explicit inline tensor_core (const tensor_core &other) + : tensor_expression_type{} + , _container{} + { + if(_extents != other.extents()){ + throw std::invalid_argument("error in boost::numeric::ublas::tensor_core: extents do not match."); + } + + ublas::copy(this->rank(), this->extents().data(), + this->data(), this->strides().data(), + other.data(), other.strides().data()); + + } + + + /** @brief Constructs a tensor_core with an tensor_core expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note type must be specified of tensor_core must be specified. + * @note dimension extents are extracted from tensors within the expression. + * + * @param expr tensor_core expression + * @param size tensor_core expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const detail::tensor_expression &expr) + : tensor_expression_type{} + , _container{} + { + detail::eval(*this, expr); + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + explicit tensor_core( matrix_type const& m ) + { + static_assert(is_matrix_v); + if(m.size1() != std::get<0>(_extents) || m.size2() != std::get<1>(_extents) ){ + throw std::invalid_argument("error in boost::numeric::ublas::tensor_core: matrix and tensor dimensions do not match."); + } + std::copy(m.data().begin(), m.data().end(), this->begin()); + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + constexpr explicit tensor_core (vector_type const& v) + { + static_assert(is_vector_v); + + if(v.size() != std::get<0>(_extents) && v.size() != std::get<1>(_extents) ){ + throw std::invalid_argument("error in boost::numeric::ublas::tensor_core: matrix and tensor dimensions do not match."); + } + std::copy(v.data().begin(), v.data().end(), this->begin()); + } + + /** @brief Constructs a tensor_core with a matrix expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr matrix expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const matrix_expression_type &expr) + : tensor_core(matrix_type(expr)) + { + } + + /** @brief Constructs a tensor_core with a vector expression + * + * @code tensor_core A = b + 3 * b; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr vector expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const vector_expression_type &expr) + : tensor_core( vector_type ( expr ) ) + { + } + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be copied. + */ + constexpr inline tensor_core (const tensor_core &t) noexcept + : tensor_expression_type{} + , _container{t._container} + {} + + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be moved. + */ + constexpr inline tensor_core (tensor_core &&t) noexcept + : tensor_expression_type{} + , _container (std::move(t._container)) + {} + + /// @brief Default destructor + ~tensor_core() = default; + + /** @brief Evaluates the tensor_expression and assigns the results to the tensor_core + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must conform with this tensor_core. + * + * @param expr expression that is evaluated. + */ + template + tensor_core &operator = (const tensor_expression_type &expr) + { + detail::eval(*this, expr); + return *this; + } + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + constexpr tensor_core& operator=(tensor_core other) noexcept + { + swap (*this, other); + return *this; + } + + constexpr tensor_core& operator=(const_reference v) noexcept + { + std::fill_n(this->_container.begin(), this->_container.size(), v); + return *this; + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline const_reference at (I1 i1, I2 i2, Is ... is) const + { + static_assert (sizeof...(is)+2 == ublas::size_v); + const auto idx = ublas::detail::to_index(_strides,i1,i2,is... ); + return _container[idx]; + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline reference at (I1 i1, I2 i2, Is ... is) + { + static_assert (sizeof...(is)+2 == ublas::size_v); + const auto idx = ublas::detail::to_index(_strides,i1,i2,is... ); + return _container[idx]; + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline constexpr const_reference operator()(Is ... is) const + { + return this->at(is...); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline constexpr reference operator()(Is ... is) + { + return this->at(is...); + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline constexpr const_reference operator [] (size_type i) const { + return this->_container[i]; + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline constexpr reference operator [] (size_type i) { + return this->_container[i]; + } + + /** @brief Element access using a single-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i); @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline constexpr const_reference at (size_type i) const { + return this->_container.at(i); + } + + /** @brief Read tensor element of a tensor \c t with a single-index \c i + * + * @code auto a = t.at(i); @endcode + * + * @param i zero-based index where 0 <= i < t.size() + */ + [[nodiscard]] inline constexpr reference at (size_type i) { + return this->_container.at(i); + } + + /** @brief Generates a tensor_core index for tensor_core contraction + * + * + * @code auto Ai = A(_i,_j,k); @endcode + * + * @param i placeholder + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline constexpr decltype(auto) operator() (index::index_type p, index_types ... ps) const + { + constexpr auto size = sizeof...(ps)+1; + static_assert(size == ublas::size_v); + return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); + } + + friend void swap(tensor_core& lhs, tensor_core& rhs) + { + std::swap(lhs._container, rhs._container); + } + + + [[nodiscard]] inline constexpr auto begin () const noexcept -> const_iterator { return _container.begin (); } + [[nodiscard]] inline constexpr auto end () const noexcept -> const_iterator { return _container.end (); } + [[nodiscard]] inline constexpr auto begin () noexcept -> iterator { return _container.begin (); } + [[nodiscard]] inline constexpr auto end () noexcept -> iterator { return _container.end (); } + [[nodiscard]] inline constexpr auto cbegin () const noexcept -> const_iterator { return _container.cbegin (); } + [[nodiscard]] inline constexpr auto cend () const noexcept -> const_iterator { return _container.cend (); } + [[nodiscard]] inline constexpr auto crbegin() const noexcept -> const_reverse_iterator { return _container.crbegin(); } + [[nodiscard]] inline constexpr auto crend () const noexcept -> const_reverse_iterator { return _container.crend (); } + [[nodiscard]] inline constexpr auto rbegin () const noexcept -> const_reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline constexpr auto rend () const noexcept -> const_reverse_iterator { return _container.rend (); } + [[nodiscard]] inline constexpr auto rbegin () noexcept -> reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline constexpr auto rend () noexcept -> reverse_iterator { return _container.rend (); } + + + [[nodiscard]] inline constexpr auto empty () const noexcept { return _container.empty(); } + [[nodiscard]] inline constexpr auto size () const noexcept { return _container.size(); } + [[nodiscard]] inline constexpr auto size (size_type r) const { return _extents.at(r); } + [[nodiscard]] inline constexpr auto rank () const noexcept { return ublas::size_v; } + [[nodiscard]] inline constexpr auto order () const noexcept { return this->rank(); } + + [[nodiscard]] constexpr inline auto const& strides () const noexcept{ return _strides; } + [[nodiscard]] inline constexpr auto const& extents () const noexcept{ return _extents; } + [[nodiscard]] inline constexpr const_pointer data () const noexcept{ return _container.data();} + [[nodiscard]] inline constexpr pointer data () noexcept{ return _container.data();} + [[nodiscard]] inline constexpr auto const& base () const noexcept{ return _container; } + + + + +private: + static constexpr extents_type _extents = extents_type{}; + static constexpr strides_type _strides = to_strides_v; + container_type _container; +}; + + + +//template +//static constexpr inline auto make_tensor( +// typename tensor_static::base_type && a, +// typename tensor_static::extents_type && /*unused*/, +// typename tensor_static::layout_type && /*unused*/) +//{ +// return tensor_static( a ); +//} + + +} // namespace boost::numeric::ublas + + +namespace boost::numeric::ublas{ + +template +using tensor_static = tensor_core>>>; + +} + +namespace boost::numeric::ublas::experimental +{ +template +using matrix_static = tensor_static, L>; + +template +using vector_static = tensor_static, L>; +} // namespace boost::numeric::ublas::experimental + +#endif + diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp new file mode 100644 index 000000000..fbb5074db --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp @@ -0,0 +1,473 @@ +// +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +/// \file tensor_core.hpp Definition for the tensor template class + +#ifndef BOOST_UBLAS_TENSOR_STATIC_RANK_HPP +#define BOOST_UBLAS_TENSOR_STATIC_RANK_HPP + +#include + +#include "../algorithms.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" +#include "../extents.hpp" +#include "../index.hpp" +#include "../index_functions.hpp" +#include "../layout.hpp" +#include "../type_traits.hpp" +#include "../tags.hpp" +#include "../concepts.hpp" + +#include "tensor_engine.hpp" + + + +namespace boost::numeric::ublas { + +template +using engine_tensor_static_rank = tensor_engine, L, std::vector>; + +template + class tensor_core> + : public detail::tensor_expression< + tensor_core>, + tensor_core>> +{ +public: + using engine_type = engine_tensor_static_rank; + using self_type = tensor_core; + + template + using tensor_expression_type = detail::tensor_expression; + template + using matrix_expression_type = matrix_expression; + template + using vector_expression_type = vector_expression; + + using super_type = tensor_expression_type; + + using container_type = typename engine_type::container_type; + using layout_type = typename engine_type::layout_type; + using extents_type = typename engine_type::extents_type; + using strides_type = typename extents_type::base_type; + + using container_traits_type = container_traits; + + using size_type = typename container_traits_type::size_type; + using difference_type = typename container_traits_type::difference_type; + using value_type = typename container_traits_type::value_type; + + using reference = typename container_traits_type::reference; + using const_reference = typename container_traits_type::const_reference; + + using pointer = typename container_traits_type::pointer; + using const_pointer = typename container_traits_type::const_pointer; + + using iterator = typename container_traits_type::iterator; + using const_iterator = typename container_traits_type::const_iterator; + + using reverse_iterator = typename container_traits_type::reverse_iterator; + using const_reverse_iterator = typename container_traits_type::const_reverse_iterator; + + using container_tag = typename container_traits_type::container_tag; + using resizable_tag = typename container_traits_type::resizable_tag; + + using matrix_type = matrix >; + using vector_type = vector >; + + tensor_core () = default; + + /** @brief Constructs a tensor_core with a \c shape + * + * @code auto t = tensor(extents<3>{3,4,2}); @endcode + * + */ + explicit inline tensor_core (extents_type e) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(ublas::to_strides(_extents,layout_type{})) + , _container(ublas::product(_extents)) + { + } + + /** @brief Constructs a tensor_core with a \c shape + * + * @code auto t = tensor{3,4,2}; @endcode + * + */ + template + explicit inline tensor_core (Is ... is) + : tensor_core(extents_type{size_type(is)...}) + { + } + + /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data + * + * @code auto t = tensor(extents<>{3,4,2},std::vector(3*4*2,1.f)); @endcode + * + * @param e instance of \c extents<> specifying the dimensions of tensor + * @param a instance of \c std::vector to be copied + */ + inline tensor_core (extents_type e, container_type a) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(ublas::to_strides(_extents,layout_type{})) + , _container(std::move(a)) + { + if(std::size(_container) != ublas::product(_extents)){ + throw std::length_error("boost::numeric::ublas::tensor_static_rank : " + "Cannot construct tensor with specified container and extents. " + "Number of container elements do not match with the specified extents."); + } + } + + /** @brief Constructs a tensor_core with another tensor_core with a different layout + * + * @param other tensor_core with a different layout to be copied. + */ + template + explicit inline tensor_core (const tensor_core &other) + : tensor_expression_type{} + , _extents (ublas::begin(other.extents()),ublas::end (other.extents ())) + , _strides (ublas::to_strides(_extents)) + , _container(std::begin(other.container()),std::end (other.container())) + { + } + + + /** @brief Constructs a tensor_core with an tensor_core expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note type must be specified of tensor_core must be specified. + * @note dimension extents are extracted from tensors within the expression. + * + * @param expr tensor_core expression + * @param size tensor_core expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (detail::tensor_expression const& expr) + : tensor_expression_type{} + , _extents (ublas::detail::retrieve_extents(expr)) + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(ublas::product(_extents)) + { + detail::eval(*this, expr); + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + explicit tensor_core( matrix_type const& m ) + : tensor_expression_type{} + , _extents {m.size1(),m.size2()} + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(m.data().begin(), m.data().end()) + { + } + + // NOLINTNEXTLINE(hicpp-explicit-conversions) + explicit tensor_core (vector_type const& v) + : tensor_expression_type{} + , _extents {v.size(),1} + , _strides (ublas::to_strides(_extents,layout_type{})) + , _container(v.data().begin(), v.data().end()) + { + } + + /** @brief Constructs a tensor_core with a matrix expression + * + * @code tensor_core A = B + 3 * C; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr matrix expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const matrix_expression_type &expr) + : tensor_core(matrix_type(expr)) + { + } + + /** @brief Constructs a tensor_core with a vector expression + * + * @code tensor_core A = b + 3 * b; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr vector expression + */ + template + // NOLINTNEXTLINE(hicpp-explicit-conversions) + inline tensor_core (const vector_expression_type &expr) + : tensor_core( vector_type ( expr ) ) + { + } + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be copied. + */ + inline tensor_core (const tensor_core &t) noexcept + : tensor_expression_type{} + , _extents (t._extents ) + , _strides (t._strides ) + , _container(t._container) + {} + + + + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be moved. + */ + inline tensor_core (tensor_core &&t) noexcept + : tensor_expression_type{} + , _extents (std::move(t._extents )) + , _strides (std::move(t._strides )) + , _container(std::move(t._container)) + {} + + /// @brief Default destructor + ~tensor_core() = default; + + /** @brief Evaluates the tensor_expression and assigns the results to the tensor_core + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must conform with this tensor_core. + * + * @param expr expression that is evaluated. + */ + template + tensor_core &operator = (const tensor_expression_type &expr) + { + detail::eval(*this, expr); + return *this; + } + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + tensor_core& operator=(tensor_core other) noexcept + { + swap (*this, other); + return *this; + } + + tensor_core& operator=(container_type c) + { + if( c.size() != this->size()){ + throw std::length_error("boost::numeric::ublas::tensor_core: " + "Cannot assign provided container to tensor." + "Number of elements do not match."); + } + _container = std::move(c); + return *this; + } + + tensor_core& operator=(const_reference v) + { + std::fill_n(_container.begin(), _container.size(), v); + return *this; + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline const_reference at (I1 i1, I2 i2, Is ... is) const + { + static_assert (sizeof...(is)+2 == std::tuple_size_v); + const auto idx = ublas::detail::to_index(_strides,i1,i2,is...); + return _container.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline reference at (I1 i1, I2 i2, Is ... is) + { + static_assert (sizeof...(Is)+2 == std::tuple_size_v); + const auto idx = ublas::detail::to_index(_strides,i1,i2,is...); + return _container.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline const_reference operator()(Is ... is) const + { + return this->at(is...); + } + + /** @brief Element access using a multi-index with bound checking which can throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline reference operator()(Is ... is) + { + return this->at(is...); + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline const_reference operator [] (size_type i) const { + return this->_container[i]; + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline reference operator [] (size_type i) { + return this->_container[i]; + } + + /** @brief Element access using a single-index with bound checking which can throw an exception. + * + * @code auto a = A.at(i); @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline const_reference at (size_type i) const { + return this->_container.at(i); + } + + /** @brief Read tensor element of a tensor \c t with a single-index \c i + * + * @code auto a = t.at(i); @endcode + * + * @param i zero-based index where 0 <= i < t.size() + */ + [[nodiscard]] inline reference at (size_type i) { + return this->_container.at(i); + } + + /** @brief Generates a tensor_core index for tensor_core contraction + * + * + * @code auto Ai = A(_i,_j,k); @endcode + * + * @param i placeholder + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + [[nodiscard]] inline constexpr decltype(auto) operator() (index::index_type p, index_types ... ps) const + { + constexpr auto size = sizeof...(index_types)+1; + static_assert(size == std::tuple_size_v); + return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); + } + + friend void swap(tensor_core& lhs, tensor_core& rhs) + { + std::swap(lhs._extents , rhs._extents ); + std::swap(lhs._strides , rhs._strides ); + std::swap(lhs._container , rhs._container); + } + + + [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } + [[nodiscard]] inline auto end () const noexcept -> const_iterator { return _container.end (); } + [[nodiscard]] inline auto begin () noexcept -> iterator { return _container.begin (); } + [[nodiscard]] inline auto end () noexcept -> iterator { return _container.end (); } + [[nodiscard]] inline auto cbegin () const noexcept -> const_iterator { return _container.cbegin (); } + [[nodiscard]] inline auto cend () const noexcept -> const_iterator { return _container.cend (); } + + [[nodiscard]] inline auto crbegin() const noexcept -> const_reverse_iterator { return _container.crbegin(); } + [[nodiscard]] inline auto crend () const noexcept -> const_reverse_iterator { return _container.crend (); } + [[nodiscard]] inline auto rbegin () const noexcept -> const_reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline auto rend () const noexcept -> const_reverse_iterator { return _container.rend (); } + [[nodiscard]] inline auto rbegin () noexcept -> reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline auto rend () noexcept -> reverse_iterator { return _container.rend (); } + + [[nodiscard]] inline auto empty () const noexcept { return _container.empty(); } + [[nodiscard]] inline auto size () const noexcept { return _container.size(); } + [[nodiscard]] inline auto size (size_type r) const { return _extents.at(r); } + [[nodiscard]] inline constexpr auto rank () const noexcept { return std::tuple_size_v; } + [[nodiscard]] inline constexpr auto order () const noexcept { return this->rank(); } + + [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } + [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _container.data();} + [[nodiscard]] inline auto data () noexcept -> pointer { return _container.data();} + [[nodiscard]] inline auto const& base () const noexcept { return _container; } + + +private: + extents_type _extents; + strides_type _strides; + container_type _container; +}; + +/** @brief Type for create a dynamic tensor instance with dynamic non-resizable extents + * + * @code + * // defines a 4-dimensional tensor type + * // tensor_core,layout::first_order,std::vector>> + * + * using ftensor = tensor_mixed; + * + * // instantiates a 4-dimension + * auto t = ftensor{{5,6,4,3}}; + * + * @endcode + * + * */ + +template +using tensor_static_rank = tensor_core>; + +} // namespace boost::numeric::ublas + + +namespace boost::numeric::ublas::experimental +{ + +template +using matrix = tensor_core>; + +template +using vector = tensor_core>; + +} // namespace boost::numeric::ublas::experimental + + +#endif // BOOST_UBLAS_TENSOR_STATIC_RANK_HPP + diff --git a/include/boost/numeric/ublas/tensor/tensor_core.hpp b/include/boost/numeric/ublas/tensor/tensor_core.hpp deleted file mode 100644 index 609c9e15e..000000000 --- a/include/boost/numeric/ublas/tensor/tensor_core.hpp +++ /dev/null @@ -1,886 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - - -/// \file tensor_core.hpp Definition for the tensor template class - -#ifndef BOOST_UBLAS_TENSOR_CORE_IMPL_HPP -#define BOOST_UBLAS_TENSOR_CORE_IMPL_HPP - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace boost::numeric::ublas { - -template< class T > -class tensor_core: - public detail::tensor_expression< tensor_core,tensor_core > -{ - - using self_type = tensor_core; - -public: - using tensor_traits = T; - - template - using tensor_expression_type = detail::tensor_expression; - - template - using matrix_expression_type = matrix_expression; - - template - using vector_expression_type = vector_expression; - - using super_type = tensor_expression_type; - using storage_traits_type = typename tensor_traits::storage_traits_type; - - using array_type = typename storage_traits_type::array_type; - using layout_type = typename tensor_traits::layout_type; - - - using size_type = typename storage_traits_type::size_type; - using difference_type = typename storage_traits_type::difference_type; - using value_type = typename storage_traits_type::value_type; - - using reference = typename storage_traits_type::reference; - using const_reference = typename storage_traits_type::const_reference; - - using pointer = typename storage_traits_type::pointer; - using const_pointer = typename storage_traits_type::const_pointer; - - using iterator = typename storage_traits_type::iterator; - using const_iterator = typename storage_traits_type::const_iterator; - - using reverse_iterator = typename storage_traits_type::reverse_iterator; - using const_reverse_iterator = typename storage_traits_type::const_reverse_iterator; - - using tensor_temporary_type = self_type; - using storage_category = dense_tag; - using container_tag = typename storage_traits_type::container_tag; - using resizable_tag = typename storage_traits_type::resizable_tag; - - using extents_type = typename tensor_traits::extents_type; - using strides_type = typename tensor_traits::strides_type; - - using matrix_type = matrix >; - using vector_type = vector >; - - /** @brief Constructs a tensor_core. - * - * @note the tensor_core is empty. - * @note the tensor_core needs to reshaped for further use. - * - */ - inline - constexpr tensor_core () - { - if constexpr( is_static_v ){ - auto temp = tensor_core(extents_type{},resizable_tag{}); - swap(*this,temp); - } - } - - constexpr tensor_core( extents_type e, [[maybe_unused]] storage_resizable_container_tag t ) - : tensor_expression_type() - , extents_(std::move(e)) - , strides_(extents_) - , data_( product(extents_) ) - {} - - constexpr tensor_core( extents_type e, [[maybe_unused]] storage_static_container_tag t ) - : tensor_expression_type() - , extents_(std::move(e)) - , strides_(extents_) - { - if ( data_.size() < product(extents_) ){ - throw std::length_error("boost::numeric::ublas::tensor_core(extents_type const&, storage_static_container_tag): " - "size of requested storage exceeds the current container size" - ); - } - } - - /** @brief Constructs a tensor_core with an initializer list for dynamic_extents - * - * By default, its elements are initialized to 0. - * - * @code tensor_core A{4,2,3}; @endcode - * - * @param l initializer list for setting the dimension extents of the tensor_core - */ - template> - > - explicit inline - tensor_core (std::initializer_list l) - : tensor_core( std::move( extents_type( std::move(l) ) ), resizable_tag{} ) - {} - - /** @brief Constructs a tensor_core with a \c shape - * - * By default, its elements are initialized to 0. - * - * @code tensor_core A{extents{4,2,3}}; @endcode - * - * @param s initial tensor_core dimension extents - */ - template> - > - explicit inline - tensor_core (extents_type s) - : tensor_core( std::move(s), resizable_tag{} ) - {} - - /** @brief Constructs a tensor_core with a \c shape - * - * By default, its elements are initialized to 0. - * - * @code tensor_core A{extents{4,2,3}}; @endcode - * - * @param s initial tensor_core dimension extents - * @param i initial tensor_core with this value - */ - template> - > - explicit inline - tensor_core (extents_type s, value_type const& i) - : tensor_core( std::move(s), resizable_tag{} ) - { - std::fill(begin(),end(),i); - } - - /** @brief Constructs a tensor_core with a \c shape - * - * By default, its elements are initialized to 0. - * - * @code tensor_core A{}; @endcode - * - * @param i initial tensor_core with this value - */ - template> - > - explicit inline - tensor_core (value_type const& i) - : tensor_core() - { - std::fill(begin(),end(),i); - } - - /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data - * - * @code tensor_core A{extents{4,2,3}, array }; @endcode - * - * - * @param s initial tensor_core dimension extents - * @param a container of \c array_type that is copied according to the storage layout - */ - template> - > - inline - tensor_core (extents_type s, const array_type &a) - : tensor_core( std::move(s), resizable_tag{} ) - { - if( size() != a.size() ){ - throw std::runtime_error("boost::numeric::ublas::tensor_core(extents_type,array_type): " - "array size mismatch with extents" - ); - } - std::copy(a.begin(),a.end(),begin()); - } - - /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data - * - * @code tensor_core A{ array }; @endcode - * - * @param a container of \c array_type that is copied according to the storage layout - */ - template> - > - inline - tensor_core (const array_type &a) - : tensor_core() - { - if( size() != a.size() ){ - throw std::runtime_error("boost::numeric::ublas::tensor_core(extents_type,array_type): " - "array size mismatch with extents" - ); - } - std::copy(a.begin(),a.end(),begin()); - } - - - /** @brief Constructs a tensor_core with another tensor_core with a different layout - * - * @param other tensor_core with a different layout to be copied. - */ - template - tensor_core (const tensor_core &other) - : tensor_core( other.extents(), resizable_tag{} ) - { - copy(this->rank(), this->extents().data(), - this->data(), this->strides().data(), - other.data(), other.strides().data()); - - } - - - /** @brief Constructs a tensor_core with an tensor_core expression - * - * @code tensor_core A = B + 3 * C; @endcode - * - * @note type must be specified of tensor_core must be specified. - * @note dimension extents are extracted from tensors within the expression. - * - * @param expr tensor_core expression - * @param size tensor_core expression - */ - template - tensor_core (const detail::tensor_expression &expr) - : tensor_core( detail::retrieve_extents(expr), resizable_tag{} ) - { - static_assert(is_valid_tensor_v, - "boost::numeric::ublas::tensor_core(tensor_expression const&) : " - "other_tensor should be a valid tensor type" - ); - - static_assert(std::is_same_v, - "boost::numeric::ublas::tensor_core(tensor_expression const&) : " - "LHS and RHS should have the same value type" - ); - - detail::eval( *this, expr ); - } - - constexpr tensor_core( matrix_type const& v ) - : tensor_core() - { - if constexpr( is_dynamic_v< extents_type > ){ - auto temp = tensor_core(extents_type{v.size1(), v.size2()}); - swap(*this,temp); - } - - if constexpr( is_static_rank_v ){ - static_assert( extents_type::_size == 2ul, - "boost::numeric::ublas::tensor_core(const matrix &v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - }else{ - if( extents_.size() != 2ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(const matrix &v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - } - } - - - if( extents_[0] != v.size1() || extents_[1] != v.size2() ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(const matrix &v)" - " : please set the extents properly, the extents should contain the row and col of the matrix" - ); - } - - std::copy(v.data().begin(), v.data().end(), data_.begin()); - } - - constexpr tensor_core( matrix_type && v ) - : tensor_core() - { - if constexpr( is_dynamic_v< extents_type > ){ - auto temp = tensor_core(extents_type{v.size1(), v.size2()}); - swap(*this,temp); - } - - if constexpr( is_static_rank_v ){ - static_assert( extents_type::_size == 2ul, - "boost::numeric::ublas::tensor_core(matrix &&v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - }else{ - if( extents_.size() != 2ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(matrix &&v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - } - } - - if( extents_[0] != v.size1() || extents_[1] != v.size2() ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(matrix &&v)" - " : please set the extents properly, the extents should contain the row and col of the matrix" - ); - } - - std::move(v.data().begin(), v.data().end(),data_.begin()); - } - - constexpr tensor_core (const vector_type &v) - : tensor_core() - { - if constexpr( is_dynamic_v< extents_type > ){ - auto temp = tensor_core(extents_type{ v.size(), typename extents_type::value_type{1} }); - swap(*this,temp); - } - - if constexpr( is_static_rank_v ){ - static_assert( extents_type::_size == 2ul, - "boost::numeric::ublas::tensor_core(const vector_type &v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - }else{ - if( extents_.size() != 2ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(const vector_type &v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - } - } - - if( extents_[0] != v.size() || extents_[1] != 1ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(const vector_type &v)" - " : please set the extents properly, the first extent should be the size of the vector and 1 for the second extent" - ); - } - - std::copy(v.data().begin(), v.data().end(), data_.begin()); - - } - - constexpr tensor_core (vector_type &&v) - : tensor_core() - { - if constexpr( is_dynamic_v< extents_type > ){ - auto temp = tensor_core(extents_type{ v.size(), typename extents_type::value_type{1} }); - swap(*this,temp); - } - - if constexpr( is_static_rank_v ){ - static_assert( extents_type::_size == 2ul, - "boost::numeric::ublas::tensor_core(vector_type &&v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - }else{ - if( extents_.size() != 2ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(vector_type &&v)" - " : the rank of extents is not correct, it should be of the rank 2" - ); - } - } - - if( extents_[0] != v.size() || extents_[1] != 1ul ){ - throw std::runtime_error( - "boost::numeric::ublas::tensor_core(vector_type &&v)" - " : please set the extents properly, the first extent should be the size of the vector and 1 for the second extent" - ); - } - - std::move(v.data().begin(), v.data().end(),data_.begin()); - - } - - /** @brief Constructs a tensor_core with a matrix expression - * - * @code tensor_core A = B + 3 * C; @endcode - * - * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. - * @note extents are automatically extracted from the temporary matrix - * - * @param expr matrix expression - */ - template - tensor_core (const matrix_expression_type &expr) - : tensor_core( matrix_type ( expr ) ) - { - } - - /** @brief Constructs a tensor_core with a vector expression - * - * @code tensor_core A = b + 3 * b; @endcode - * - * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. - * @note extents are automatically extracted from the temporary matrix - * - * @param expr vector expression - */ - template - tensor_core (const vector_expression_type &expr) - : tensor_core( vector_type ( expr ) ) - { - } - - - /** @brief Constructs a tensor_core from another tensor_core - * - * @param v tensor_core to be copied. - */ - inline - tensor_core (const tensor_core &v) - : tensor_expression_type() - , extents_ (v.extents_) - , strides_ (v.strides_) - , data_ (v.data_ ) - {} - - - - /** @brief Constructs a tensor_core from another tensor_core - * - * @param v tensor_core to be moved. - */ - inline - tensor_core (tensor_core &&v) noexcept - : tensor_expression_type() //tensor_container () - , extents_ (std::move(v.extents_)) - , strides_ (std::move(v.strides_)) - , data_ (std::move(v.data_ )) - {} - - - /** @brief Move assignsment operator - * - * @param v tensor_core to be moved. - */ - inline - tensor_core& operator=(tensor_core &&v) noexcept - { - swap(*this,v); - return *this; - } - - /// @brief Default destructor - ~tensor_core() = default; - - /** @brief Evaluates the tensor_expression and assigns the results to the tensor_core - * - * @code A = B + C * 2; @endcode - * - * @note rank and dimension extents of the tensors in the expressions must conform with this tensor_core. - * - * @param expr expression that is evaluated. - */ - template - tensor_core &operator = (const tensor_expression_type &expr) - { - detail::eval(*this, expr); - return *this; - } - - tensor_core& operator=(tensor_core const& other) - { - tensor_core temp(other); - swap (*this, temp); - return *this; - } - - constexpr tensor_core& operator=(const_reference v) - { - std::fill_n(this->begin(), this->size(), v); - return *this; - } - - /** @brief Returns true if the tensor_core is empty (\c size==0) */ - [[nodiscard]] inline - constexpr bool empty () const noexcept{ - return this->data_.empty(); - } - - /** @brief Returns the upper bound or max size of the tensor_core */ - [[nodiscard]] inline - constexpr size_type size() const noexcept{ - return this->data_.size(); - } - - /** @brief Returns the size of the tensor_core */ - [[nodiscard]] inline - constexpr size_type size (size_type r) const { - return this->extents_.at(r); - } - - /** @brief Returns the number of dimensions/modes of the tensor_core */ - [[nodiscard]] inline - constexpr size_type rank () const noexcept{ - return this->extents_.size(); - } - - /** @brief Returns the number of dimensions/modes of the tensor_core */ - [[nodiscard]] inline - constexpr size_type order () const noexcept{ - return this->extents_.size(); - } - - /** @brief Returns the strides of the tensor_core */ - [[nodiscard]] inline - constexpr strides_type const& strides () const noexcept{ - return this->strides_; - } - - /** @brief Returns the extents of the tensor_core */ - [[nodiscard]] inline - constexpr extents_type const& extents () const noexcept{ - return this->extents_; - } - - /** @brief Returns the strides of the tensor_core */ - [[nodiscard]] inline - constexpr strides_type& strides () noexcept{ - return this->strides_; - } - - /** @brief Returns the extents of the tensor_core */ - [[nodiscard]] inline - constexpr extents_type& extents () noexcept{ - return this->extents_; - } - - /** @brief Returns a \c const reference to the container. */ - [[nodiscard]] inline - constexpr const_pointer data () const noexcept{ - return this->data_.data(); - } - - /** @brief Returns a \c const reference to the container. */ - [[nodiscard]] inline - constexpr pointer data () noexcept{ - return this->data_.data(); - } - - /** @brief Returns a \c const reference to the underlying container. */ - [[nodiscard]] inline - constexpr array_type const& base () const noexcept{ - return data_; - } - - /** @brief Returns a reference to the underlying container. */ - [[nodiscard]] inline - constexpr array_type& base () noexcept{ - return data_; - } - - /** @brief Element access using a single index. - * - * @code auto a = A[i]; @endcode - * - * @param i zero-based index where 0 <= i < this->size() - */ - [[nodiscard]] inline - constexpr const_reference operator [] (size_type i) const { - return this->data_[i]; - } - - /** @brief Element access using a single index. - * - * @code auto a = A[i]; @endcode - * - * @param i zero-based index where 0 <= i < this->size() - */ - [[nodiscard]] inline - constexpr reference operator [] (size_type i) { - return this->data_[i]; - } - - /** @brief Element access using a multi-index or single-index with bound checking - * and it throws the exception. - * - * @code auto a = A.at(i,j,k); @endcode or - * @code auto a = A.at(i); @endcode - * - * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - template - [[nodiscard]] inline - constexpr const_reference at (size_type i, Indices ... is) const { - if constexpr( sizeof...(is) == 0ul ){ - return this->data_.at(i); - }else{ - if( sizeof...(is) + 1 > strides_.size() ){ - throw std::runtime_error("Error in boost::numeric::ublas::at(size_type, Indices...): " - "number of variadic argument exceeds the strides size." - ); - } - static_assert( - std::conjunction_v< std::is_convertible... >, - "boost::numeric::ublas::tensor_core::at(size_type,Indices...) : " - "provided variadic argument is not convertible to tensor size_type" - ); - using strides_value_type = typename strides_type::value_type; - auto const idx = detail::access(this->strides_, - static_cast(i), - static_cast(is)... - ); - return this->data_.at(idx); - } - } - - /** @brief Element access using a multi-index or single-index with bound checking - * and it throws the exception. - * - * - * @code A.at(i,j,k) = a; @endcode or - * @code A.at(i) = a; @endcode - * - * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - template - [[nodiscard]] inline - constexpr reference at (size_type i, Indices ... is) { - if constexpr( sizeof...(is) == 0ul ){ - return this->data_.at(i); - }else{ - if( sizeof...(is) + 1 > strides_.size() ){ - throw std::runtime_error("Error in boost::numeric::ublas::at(size_type, Indices...): " - "number of variadic argument exceeds the strides size." - ); - } - static_assert( - std::conjunction_v< std::is_convertible... >, - "boost::numeric::ublas::tensor_core::at(size_type,Indices...) : " - "provided variadic argument is not convertible to tensor size_type" - ); - using strides_value_type = typename strides_type::value_type; - auto const idx = detail::access(this->strides_, - static_cast(i), - static_cast(is)... - ); - return this->data_.at(idx); - } - } - - /** @brief Element access using a multi-index or single-index with no bound checking - * and it does not throw. - * - * - * @code auto a = A(i,j,k); @endcode or - * @code auto a = A(i); @endcode - * - * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - template - [[nodiscard]] inline - constexpr const_reference operator() (size_type i, Indices ... is) const { - if constexpr( sizeof...(is) == 0ul ){ - return this->data_[i]; - }else{ - static_assert( - std::conjunction_v< std::is_convertible... >, - "boost::numeric::ublas::tensor_core::operator()(size_type,Indices...) : " - "provided variadic argument is not convertible to tensor size_type" - ); - using strides_value_type = typename strides_type::value_type; - auto const idx = detail::access(this->strides_, - static_cast(i), - static_cast(is)... - ); - return this->data_[idx]; - } - } - - /** @brief Element access using a multi-index or single-index with no bound checking - * and it does not throw. - * - * - * @code A(i,j,k) = a; @endcode or - * @code A(i) = a; @endcode - * - * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - template - [[nodiscard]] inline - constexpr reference operator() (size_type i, Indices ... is) { - if constexpr( sizeof...(is) == 0ul ){ - return this->data_[i]; - }else{ - static_assert( - std::conjunction_v< std::is_convertible... >, - "boost::numeric::ublas::tensor_core::operator()(size_type,Indices...) : " - "provided variadic argument is not convertible to tensor size_type" - ); - using strides_value_type = typename strides_type::value_type; - auto const idx = detail::access(this->strides_, - static_cast(i), - static_cast(is)... - ); - return this->data_[idx]; - } - } - - /** @brief Generates a tensor_core index for tensor_core contraction - * - * - * @code auto Ai = A(_i,_j,k); @endcode - * - * @param i placeholder - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - template - [[nodiscard]] inline - constexpr decltype(auto) operator() (index::index_type p, index_types ... ps) const - { - constexpr auto N = sizeof...(ps)+1; - if( N != this->rank() ) - throw std::runtime_error("Error in boost::numeric::ublas::operator(index::index_type,index_types&&): " - "size of provided index_types does not match with the rank." - ); - - return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); - } - - - /** @brief Reshapes the basic_tensor - * - * - * (1) @code A.reshape(extents{m,n,o}); @endcode or - * (2) @code A.reshape(extents{m,n,o},4); @endcode - * - * If the size of this smaller than the specified extents than - * default constructed (1) or specified (2) value is appended. - * - * @note rank of the basic_tensor might also change. - * - * @param e extents with which the basic_tensor is reshaped. - * @param v value which is appended if the basic_tensor is enlarged. - */ - inline - void reshape (extents_type const& e, value_type v = value_type{}) - { - static_assert(is_dynamic_v && is_dynamic_v, - "Error in boost::numeric::ublas::basic_tensor::reshape(extents_type const&,value_type) : " - "static extents or static strides cannot used inside reshape function" - ); - - this->extents_ = e; - this->strides_ = strides_type(this->extents_); - - auto p = product(extents_); - if constexpr( !std::is_same_v< resizable_tag, storage_resizable_container_tag > ){ - if( p != this->size() ){ - throw std::runtime_error( - "boost::numeric::ublas::basic_tensor::reshape(extents_type const&,value_type) : " - "cannot resize the non-resizable container, change the extents such a way that the product does not change" - ); - } - }else{ - if(p != this->size()) - this->data_.resize (p, v); - } - } - - friend void swap(tensor_core& lhs, tensor_core& rhs){ - std::swap(lhs.data_ , rhs.data_ ); - std::swap(lhs.extents_, rhs.extents_); - std::swap(lhs.strides_, rhs.strides_); - } - - - /// \brief return an iterator on the first element of the tensor_core - [[nodiscard]] inline - constexpr const_iterator begin () const noexcept{ - return data_.begin (); - } - - /// \brief return an iterator on the first element of the tensor_core - [[nodiscard]] inline - constexpr const_iterator cbegin () const noexcept{ - return data_.cbegin (); - } - - /// \brief return an iterator after the last element of the tensor_core - [[nodiscard]] inline - constexpr const_iterator end () const noexcept{ - return data_.end(); - } - - /// \brief return an iterator after the last element of the tensor_core - [[nodiscard]] inline - constexpr const_iterator cend () const noexcept{ - return data_.cend (); - } - - /// \brief Return an iterator on the first element of the tensor_core - [[nodiscard]] inline - constexpr iterator begin () noexcept{ - return data_.begin(); - } - - /// \brief Return an iterator at the end of the tensor_core - [[nodiscard]] inline - constexpr iterator end () noexcept{ - return data_.end(); - } - - /// \brief Return a const reverse iterator before the first element of the reversed tensor_core (i.e. end() of normal tensor_core) - [[nodiscard]] inline - constexpr const_reverse_iterator rbegin () const noexcept{ - return data_.rbegin(); - } - - /// \brief Return a const reverse iterator before the first element of the reversed tensor_core (i.e. end() of normal tensor_core) - [[nodiscard]] inline - constexpr const_reverse_iterator crbegin () const noexcept{ - return data_.crbegin(); - } - - /// \brief Return a const reverse iterator on the end of the reverse tensor_core (i.e. first element of the normal tensor_core) - [[nodiscard]] inline - constexpr const_reverse_iterator rend () const noexcept{ - return data_.rend(); - } - - /// \brief Return a const reverse iterator on the end of the reverse tensor_core (i.e. first element of the normal tensor_core) - [[nodiscard]] inline - constexpr const_reverse_iterator crend () const noexcept{ - return data_.crend(); - } - - /// \brief Return a const reverse iterator before the first element of the reversed tensor_core (i.e. end() of normal tensor_core) - [[nodiscard]] inline - constexpr reverse_iterator rbegin () noexcept{ - return data_.rbegin(); - } - - /// \brief Return a const reverse iterator on the end of the reverse tensor_core (i.e. first element of the normal tensor_core) - [[nodiscard]] inline - constexpr reverse_iterator rend () noexcept{ - return data_.rend(); - } - -private: - - extents_type extents_; - strides_type strides_; - array_type data_; -}; - -} // namespaces - -#endif diff --git a/include/boost/numeric/ublas/tensor/tensor_engine.hpp b/include/boost/numeric/ublas/tensor/tensor_engine.hpp deleted file mode 100644 index 8f9293e05..000000000 --- a/include/boost/numeric/ublas/tensor/tensor_engine.hpp +++ /dev/null @@ -1,50 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_ENGINE_IMPL_HPP -#define BOOST_UBLAS_TENSOR_ENGINE_IMPL_HPP - -#include - -namespace boost::numeric::ublas{ - - template - struct tensor_engine; - - template - struct tensor_engine{ - using extents_type = ExtentsType; - - static_assert(is_extents_v, - "boost::numeric::ublas::tensor_engine : please provide valid tensor extents type" - ); - - using layout_type = LayoutType; - using strides_type = typename StrideType::template type; - - static_assert(is_strides_v, - "boost::numeric::ublas::tensor_engine : please provide valid tensor layout type" - ); - - using storage_traits_type = storage_traits; - - }; - - template - struct tensor_engine - : tensor_engine< ExtentsType, LayoutType, strides, StorageType > - {}; - -} // namespace boost::numeric::ublas - - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/basic_type_traits.hpp b/include/boost/numeric/ublas/tensor/traits/basic_type_traits.hpp index 7d75800ba..ba6510194 100644 --- a/include/boost/numeric/ublas/tensor/traits/basic_type_traits.hpp +++ b/include/boost/numeric/ublas/tensor/traits/basic_type_traits.hpp @@ -1,64 +1,31 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// #ifndef BOOST_UBLAS_TENSOR_BASIC_TYPE_TRAITS_HPP #define BOOST_UBLAS_TENSOR_BASIC_TYPE_TRAITS_HPP #include #include +#include +#include namespace boost::numeric::ublas { - -/** @brief Checks if the extents or strides is dynamic - * - * @tparam E of type basic_extents or basic_static_extents - * - */ -template struct is_dynamic : std::false_type {}; - -template -inline static constexpr bool const is_dynamic_v = is_dynamic::value; - -/** @brief Checks if the extents or strides is static - * - * @tparam E of type basic_extents or basic_static_extents - * - */ -template struct is_static : std::false_type {}; - -template -inline static constexpr bool const is_static_v = is_static::value; -/** @brief Checks if the extents or strides has dynamic rank - * - * @tparam E of type basic_extents or basic_static_extents - * - */ -template -struct is_dynamic_rank : std::false_type {}; -template -inline static constexpr bool const is_dynamic_rank_v = is_dynamic_rank::value; +template +struct is_complex : std::false_type{}; -/** @brief Checks if the extents or strides has static rank - * - * @tparam E of type basic_extents or basic_static_extents - * - */ -template -struct is_static_rank : std::false_type {}; +template +struct is_complex< std::complex > : std::true_type{}; -template -inline static constexpr bool const is_static_rank_v = is_static_rank::value; +template +inline static constexpr bool is_complex_v = is_complex::value; } // namespace boost::numeric::ublas diff --git a/include/boost/numeric/ublas/tensor/traits/storage_traits.hpp b/include/boost/numeric/ublas/tensor/traits/storage_traits.hpp index 4e5619966..e7bef80ae 100644 --- a/include/boost/numeric/ublas/tensor/traits/storage_traits.hpp +++ b/include/boost/numeric/ublas/tensor/traits/storage_traits.hpp @@ -1,52 +1,51 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com + // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany +// The authors gratefully acknowledge the support of Google // #ifndef BOOST_UBLAS_TRAITS_STORAGE_HPP #define BOOST_UBLAS_TRAITS_STORAGE_HPP -#include #include -#include +#include -namespace boost { -namespace numeric { -namespace ublas { +#include "../tags.hpp" +namespace boost::numeric::ublas +{ -template -struct storage_traits; +template +struct container_traits; template -struct storage_traits> +struct container_traits> { - using array_type = std::vector; + using container_type = std::vector; - using size_type = typename array_type::size_type; - using difference_type = typename array_type::difference_type; - using value_type = typename array_type::value_type; + using size_type = typename container_type::size_type; + using difference_type = typename container_type::difference_type; + using value_type = typename container_type::value_type; - using reference = typename array_type::reference; - using const_reference = typename array_type::const_reference; + using reference = typename container_type::reference; + using const_reference = typename container_type::const_reference; - using pointer = typename array_type::pointer; - using const_pointer = typename array_type::const_pointer; + using pointer = typename container_type::pointer; + using const_pointer = typename container_type::const_pointer; - using iterator = typename array_type::iterator; - using const_iterator = typename array_type::const_iterator; + using iterator = typename container_type::iterator; + using const_iterator = typename container_type::const_iterator; - using reverse_iterator = typename array_type::reverse_iterator; - using const_reverse_iterator = typename array_type::const_reverse_iterator; + using reverse_iterator = typename container_type::reverse_iterator; + using const_reverse_iterator = typename container_type::const_reverse_iterator; using container_tag = storage_seq_container_tag; using resizable_tag = storage_resizable_container_tag; @@ -57,25 +56,25 @@ struct storage_traits> template -struct storage_traits> +struct container_traits> { - using array_type = std::array; + using container_type = std::array; - using size_type = typename array_type::size_type; - using difference_type = typename array_type::difference_type; - using value_type = typename array_type::value_type; + using size_type = typename container_type::size_type; + using difference_type = typename container_type::difference_type; + using value_type = typename container_type::value_type; - using reference = typename array_type::reference; - using const_reference = typename array_type::const_reference; + using reference = typename container_type::reference; + using const_reference = typename container_type::const_reference; - using pointer = typename array_type::pointer; - using const_pointer = typename array_type::const_pointer; + using pointer = typename container_type::pointer; + using const_pointer = typename container_type::const_pointer; - using iterator = typename array_type::iterator; - using const_iterator = typename array_type::const_iterator; + using iterator = typename container_type::iterator; + using const_iterator = typename container_type::const_iterator; - using reverse_iterator = typename array_type::reverse_iterator; - using const_reverse_iterator = typename array_type::const_reverse_iterator; + using reverse_iterator = typename container_type::reverse_iterator; + using const_reverse_iterator = typename container_type::const_reverse_iterator; using container_tag = storage_seq_container_tag; using resizable_tag = storage_static_container_tag; @@ -87,38 +86,40 @@ struct storage_traits> using rebind_size = std::array; }; -} // ublas -} // numeric -} // boost +} // namespace boost::numeric::ublas namespace boost::numeric::ublas { + +template +class basic_static_extents; + namespace detail{ template struct rebind_storage_size_helper{ using type = A; }; - template - struct rebind_storage_size_helper, A, storage_static_container_tag>{ - using type = typename storage_traits::template rebind_size< E0 * (Es * ...) >; + template + struct rebind_storage_size_helper, C, storage_static_container_tag>{ + using type = typename container_traits::template rebind_size< E0 * (Es * ...) >; }; - template - struct rebind_storage_size_helper, A, storage_static_container_tag>{ - using type = typename storage_traits::template rebind_size< 0 >; + template + struct rebind_storage_size_helper, C, storage_static_container_tag>{ + using type = typename container_traits::template rebind_size< 0 >; }; - } + } //namespace detail - template + template struct rebind_storage_size - : detail::rebind_storage_size_helper::resizable_tag + : detail::rebind_storage_size_helper::resizable_tag > {}; - template - using rebind_storage_size_t = typename rebind_storage_size::type; + template + using rebind_storage_size_t = typename rebind_storage_size::type; } // namespace boost::numeric::ublas diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_extents.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_extents.hpp deleted file mode 100644 index 54a11a471..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_extents.hpp +++ /dev/null @@ -1,46 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_DYNAMIC_EXTENTS_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_DYNAMIC_EXTENTS_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_extents; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - - template - struct is_extents< basic_extents > : std::true_type {}; - - template - struct is_dynamic< basic_extents > : std::true_type {}; - - template - struct is_dynamic_rank< basic_extents > : std::true_type {}; - - - namespace detail{ - - template <> struct dynamic_extents_impl<> { - using type = basic_extents; - }; - - } // namespace detail - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_strides.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_strides.hpp deleted file mode 100644 index 0eaf8ff9e..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_dynamic_strides.hpp +++ /dev/null @@ -1,47 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_DYNAMIC_STRIDES_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_DYNAMIC_STRIDES_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_extents; - -template -class basic_strides; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - - template - struct is_strides> : std::true_type {}; - - template - struct is_dynamic< basic_strides > : std::true_type {}; - - template - struct is_dynamic_rank< basic_strides > : std::true_type {}; - - template - struct strides> - { - template - using type = basic_strides; - }; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_extents.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_extents.hpp deleted file mode 100644 index 14ed34870..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_extents.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_EXTENTS_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_EXTENTS_HPP - -namespace boost::numeric::ublas { - -/// @brief checks if the type is tensor extents or not -template -struct is_extents : std::false_type {}; - -template -inline static constexpr bool const is_extents_v = is_extents::value; - -namespace detail{ - - template - struct dynamic_extents_impl; - -} // detail - -template -using extents = typename detail::dynamic_extents_impl::type; - -} // namespace boost::numeric::ublas - -#include -#include -#include - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_extents.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_extents.hpp deleted file mode 100644 index f1cbfba38..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_extents.hpp +++ /dev/null @@ -1,45 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_FIXED_RANK_EXTENTS_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_FIXED_RANK_EXTENTS_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_fixed_rank_extents; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - - template - struct is_extents< basic_fixed_rank_extents > : std::true_type {}; - - template - struct is_dynamic< basic_fixed_rank_extents > : std::true_type {}; - - template - struct is_static_rank< basic_fixed_rank_extents > : std::true_type {}; - - namespace detail{ - - template struct dynamic_extents_impl { - using type = basic_fixed_rank_extents; - }; - - } // namespace detail - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_strides.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_strides.hpp deleted file mode 100644 index 2e378a269..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_fixed_rank_strides.hpp +++ /dev/null @@ -1,46 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_FIXED_RANK_STRIDES_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_FIXED_RANK_STRIDES_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_fixed_rank_extents; - -template class basic_fixed_rank_strides; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - - template - struct is_strides< basic_fixed_rank_strides< T, R, L> > : std::true_type {}; - - template - struct is_dynamic< basic_fixed_rank_strides > : std::true_type {}; - - template - struct is_static_rank< basic_fixed_rank_strides > : std::true_type {}; - - template - struct strides> - { - template - using type = basic_fixed_rank_strides; - }; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_static_extents.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_static_extents.hpp deleted file mode 100644 index 19662f0ae..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_static_extents.hpp +++ /dev/null @@ -1,37 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_STTAIC_EXTENTS_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_STTAIC_EXTENTS_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_static_extents; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - -template -struct is_extents< basic_static_extents > : std::true_type {}; - -template -struct is_static< basic_static_extents > : std::true_type {}; - -template -struct is_static_rank< basic_static_extents > : std::true_type {}; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_static_strides.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_static_strides.hpp deleted file mode 100644 index 47137287b..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_static_strides.hpp +++ /dev/null @@ -1,46 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_STTAIC_STRIDES_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_STTAIC_STRIDES_HPP - -#include - -namespace boost::numeric::ublas{ - -template class basic_static_extents; - -template class basic_static_strides; - -} // namespace boost::numeric::ublas - -namespace boost::numeric::ublas{ - - template - struct is_strides< basic_static_strides< basic_static_extents, L > > : std::true_type {}; - - template - struct is_static< basic_static_strides< basic_static_extents, L > > : std::true_type {}; - - template - struct is_static_rank< basic_static_strides< basic_static_extents, L > > : std::true_type {}; - - template - struct strides> - { - template - using type = basic_static_strides, Layout>; - }; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_strides.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_strides.hpp deleted file mode 100644 index f31fb67a9..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_strides.hpp +++ /dev/null @@ -1,44 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_STRIDES_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_STRIDES_HPP - -namespace boost::numeric::ublas { - - /// @brief checks if the type is tensor strides or not - template - struct is_strides : std::false_type {}; - - template - inline static constexpr bool const is_strides_v = is_strides::value; - - template - struct strides; - - /** @brief type alias of result of strides::type - * - * @tparam E extents type either basic_extents or basic_static_extents - * - * @tparam Layout either first_order or last_order - * - */ - template - using strides_t = typename strides::template type; - -} // namespace boost::numeric::ublas - -#include -#include -#include - -#endif diff --git a/include/boost/numeric/ublas/tensor/traits/type_traits_tensor.hpp b/include/boost/numeric/ublas/tensor/traits/type_traits_tensor.hpp deleted file mode 100644 index b2bb161cc..000000000 --- a/include/boost/numeric/ublas/tensor/traits/type_traits_tensor.hpp +++ /dev/null @@ -1,41 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_TENSOR_HPP -#define BOOST_UBLAS_TENSOR_TYPE_TRAITS_TENSOR_HPP - -#include -#include -#include - -namespace boost::numeric::ublas{ - - template class tensor_core; - -} // namespace boost::numeric::ublas - - -namespace boost::numeric::ublas { - - /// @brief Checks if the type is valid tensor - template - struct is_valid_tensor: std::false_type{}; - - template - struct is_valid_tensor< tensor_core >: std::true_type{}; - - template - inline static constexpr bool is_valid_tensor_v = is_valid_tensor::value; - -} // namespace boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/type_traits.hpp b/include/boost/numeric/ublas/tensor/type_traits.hpp index cf1b865c7..bd12bd4b6 100644 --- a/include/boost/numeric/ublas/tensor/type_traits.hpp +++ b/include/boost/numeric/ublas/tensor/type_traits.hpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -13,10 +13,7 @@ #ifndef BOOST_UBLAS_TENSOR_TYPE_TRAITS_HPP #define BOOST_UBLAS_TENSOR_TYPE_TRAITS_HPP -#include -#include - -#include -#include +#include "traits/basic_type_traits.hpp" +#include "traits/storage_traits.hpp" #endif diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index 839d3a323..723f5b11a 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -32,35 +32,43 @@ explicit unit_test_framework ; test-suite boost-ublas-tensor-test : - [ run test_tensor.cpp - test_strides.cpp + [ run test_algorithms.cpp + test_einstein_notation.cpp test_expression.cpp - test_operators_comparison.cpp - test_operators_arithmetic.cpp - test_multiplication.cpp - test_multi_index_utility.cpp - test_multi_index.cpp - test_extents.cpp test_expression_evaluation.cpp - test_einstein_notation.cpp - test_algorithms.cpp - test_tensor_matrix_vector.cpp + test_extents_dynamic.cpp + test_extents_dynamic_rank_static.cpp + test_extents_functions.cpp + test_fixed_rank_expression_evaluation.cpp + test_fixed_rank_extents.cpp + test_fixed_rank_functions.cpp + test_fixed_rank_operators_arithmetic.cpp + test_fixed_rank_operators_comparison.cpp + test_fixed_rank_strides.cpp + test_fixed_rank_tensor.cpp + test_fixed_rank_tensor_matrix_vector.cpp test_functions.cpp - test_static_tensor.cpp + test_multi_index.cpp + test_multi_index_utility.cpp + test_multiplication.cpp + test_operators_arithmetic.cpp + test_operators_comparison.cpp + test_static_expression_evaluation.cpp test_static_extents.cpp - test_static_strides.cpp test_static_operators_arithmetic.cpp test_static_operators_comparison.cpp - test_static_expression_evaluation.cpp + test_static_strides.cpp + test_static_tensor.cpp test_static_tensor_matrix_vector.cpp - test_fixed_rank_tensor.cpp - test_fixed_rank_extents.cpp - test_fixed_rank_strides.cpp - test_fixed_rank_operators_arithmetic.cpp - test_fixed_rank_operators_comparison.cpp - test_fixed_rank_expression_evaluation.cpp - test_fixed_rank_tensor_matrix_vector.cpp - test_fixed_rank_functions.cpp - unit_test_framework + test_strides.cpp + test_tensor.cpp + test_tensor_matrix_vector.cpp + unit_test_framework + : + : + : + : test_tensor + : + # ] ; diff --git a/test/tensor/test_algorithms.cpp b/test/tensor/test_algorithms.cpp index 5a11a9c3a..477ee1e0c 100644 --- a/test/tensor/test_algorithms.cpp +++ b/test/tensor/test_algorithms.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -15,42 +15,37 @@ #include #include #include -#include -#include -#include -#include +#include #include "utility.hpp" #include -BOOST_AUTO_TEST_SUITE ( test_tensor_algorithms, - * boost::unit_test::depends_on("test_extents") - * boost::unit_test::depends_on("test_strides")) +BOOST_AUTO_TEST_SUITE ( test_tensor_algorithms/*, + * boost::unit_test::depends_on("test_shape_dynamic") * boost::unit_test::depends_on("test_strides")*/ + ) // BOOST_AUTO_TEST_SUITE ( test_tensor_algorithms) using test_types = zip>::with_t; -using test_types2 = std::tuple>; +using test_types2 = std::tuple>; struct fixture { - using extents_type = boost::numeric::ublas::extents<>; - fixture() - : extents { - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{4,1,3}, // 6 - extents_type{1,2,3}, // 7 - extents_type{4,2,3}, // 8 - extents_type{4,2,3,5} } // 9 - { - } - std::vector extents; + using extents_t = boost::numeric::ublas::extents<>; + const std::vector extents = + { + extents_t{1,1}, // 1 + extents_t{1,2}, // 2 + extents_t{2,1}, // 3 + extents_t{2,3}, // 4 + extents_t{2,3,1}, // 5 + extents_t{4,1,3}, // 6 + extents_t{1,2,3}, // 7 + extents_t{4,2,3}, // 8 + extents_t{4,2,3,5} + }; }; @@ -58,401 +53,364 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_copy, value, test_types2, fixture ) { - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; - + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; - for(auto const& n : extents) { - auto a = vector_type(product(n)); - auto b = vector_type(product(n)); - auto c = vector_type(product(n)); + constexpr auto first_order = ublas::layout::first_order{}; + constexpr auto last_order = ublas::layout::last_order {}; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wb = ublas::strides_t,ublas::layout::last_order> (n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - auto v = value_type{}; - for(auto i = 0ul; i < a.size(); ++i, v+=1){ - a[i]=v; - } + for(auto const& n : extents) { - ublas::copy( n.size(), n.data(), b.data(), wb.data(), a.data(), wa.data() ); - ublas::copy( n.size(), n.data(), c.data(), wc.data(), b.data(), wb.data() ); + auto a = vector_t(product(n)); + auto b = vector_t(product(n)); + auto c = vector_t(product(n)); - for(auto i = 1ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i], a[i] ); + auto wa = ublas::to_strides(n,first_order); + auto wb = ublas::to_strides(n,last_order ); + auto wc = ublas::to_strides(n,first_order); - using size_type = typename ublas::strides_t,ublas::layout::first_order>::value_type; - size_type const*const p0 = nullptr; - BOOST_CHECK_THROW( ublas::copy( n.size(), p0, c.data(), wc.data(), b.data(), wb.data() ), std::runtime_error ); - BOOST_CHECK_THROW( ublas::copy( n.size(), n.data(), c.data(), p0, b.data(), wb.data() ), std::runtime_error ); - BOOST_CHECK_THROW( ublas::copy( n.size(), n.data(), c.data(), wc.data(), b.data(), p0 ), std::runtime_error ); - - value_type* c0 = nullptr; - BOOST_CHECK_THROW( ublas::copy( n.size(), n.data(), c0, wc.data(), b.data(), wb.data() ), std::runtime_error ); + auto v = value_type{}; + for(auto i = 0ul; i < a.size(); ++i, v+=1){ + a[i]=v; } - // special case rank == 0 - { - auto n = ublas::extents<>{}; + ublas::copy( ublas::size(n), n.data(), b.data(), wb.data(), a.data(), wa.data() ); + ublas::copy( ublas::size(n), n.data(), c.data(), wc.data(), b.data(), wb.data() ); - auto a = vector_type(product(n)); - auto b = vector_type(product(n)); - auto c = vector_type(product(n)); + for(auto i = 1ul; i < c.size(); ++i) + BOOST_CHECK_EQUAL( c[i], a[i] ); + std::size_t const*const p0 = nullptr; + value_type* c0 = nullptr; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wb = ublas::strides_t,ublas::layout::last_order> (n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + BOOST_CHECK_THROW( ublas::copy( ublas::size(n), p0, c.data(), wc.data(), b.data(), wb.data() ), std::runtime_error ); + BOOST_CHECK_THROW( ublas::copy( ublas::size(n), n.data(), c.data(), p0, b.data(), wb.data() ), std::runtime_error ); + BOOST_CHECK_THROW( ublas::copy( ublas::size(n), n.data(), c.data(), wc.data(), b.data(), p0 ), std::runtime_error ); + BOOST_CHECK_THROW( ublas::copy( ublas::size(n), n.data(), c0, wc.data(), b.data(), wb.data() ), std::runtime_error ); + } +} - ublas::copy( n.size(), n.data(), b.data(), wb.data(), a.data(), wa.data() ); - ublas::copy( n.size(), n.data(), c.data(), wc.data(), b.data(), wb.data() ); +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_copy_exceptions, value, test_types2, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; + constexpr auto first_order = ublas::layout::first_order{}; - BOOST_CHECK_NO_THROW( ublas::copy( n.size(), n.data(), c.data(), wc.data(), b.data(), wb.data() ) ); + for(auto const& n : extents) { - } + value_type* a = nullptr; + auto c = vector_t(ublas::product(n)); + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), n.data(), c.data(), wc.data(), a, wa.data() ), std::runtime_error ); + } + for(auto const& n : extents) { -} + value_type* a = nullptr; + value_type* c = nullptr; + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_copy_exceptions, value, test_types2, fixture ) -{ - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), n.data(), c, wc.data(), a, wa.data() ), std::runtime_error ); - for(auto const& n : extents) { + } - value_type* a = nullptr; - auto c = vector_type(product(n)); + for(auto const& n : extents) { - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + auto a = vector_t(product(n)); + value_type* c = nullptr; - BOOST_REQUIRE_THROW( ublas::copy( n.size(), n.data(), c.data(), wc.data(), a, wa.data() ), std::runtime_error ); - - } + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); - for(auto const& n : extents) { + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), n.data(), c, wc.data(), a.data(), wa.data() ), std::runtime_error ); - value_type* a = nullptr; - value_type* c = nullptr; + } - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + for(auto const& n : extents) { - BOOST_REQUIRE_THROW( ublas::copy( n.size(), n.data(), c, wc.data(), a, wa.data() ), std::runtime_error ); + auto a = vector_t(product(n)); + auto c = vector_t(product(n)); - } - for(auto const& n : extents) { + size_t* wa = nullptr; + auto wc = ublas::to_strides(n,first_order); - auto a = vector_type(product(n)); - value_type* c = nullptr; + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), n.data(), c.data(), wc.data(), a.data(), wa ), std::runtime_error ); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + } - BOOST_REQUIRE_THROW( ublas::copy( n.size(), n.data(), c, wc.data(), a.data(), wa.data() ), std::runtime_error ); - - } + for(auto const& n : extents) { - for(auto const& n : extents) { + auto a = vector_t(product(n)); + auto c = vector_t(product(n)); - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); - size_t* wa = nullptr; - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - BOOST_REQUIRE_THROW( ublas::copy( n.size(), n.data(), c.data(), wc.data(), a.data(), wa ), std::runtime_error ); - - } + size_t* wc = nullptr; + auto wa = ublas::to_strides(n,first_order); - for(auto const& n : extents) { + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), n.data(), c.data(), wc, a.data(), wa.data() ), std::runtime_error ); - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); + } - size_t* wc = nullptr; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); + for(auto const& n : extents) { - BOOST_REQUIRE_THROW( ublas::copy( n.size(), n.data(), c.data(), wc, a.data(), wa.data() ), std::runtime_error ); - - } + auto a = vector_t(product(n)); + auto c = vector_t(product(n)); - for(auto const& n : extents) { + size_t* m = nullptr; + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); - - size_t* m = nullptr; - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); + BOOST_REQUIRE_THROW( ublas::copy( ublas::size(n), m, c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); - BOOST_REQUIRE_THROW( ublas::copy( n.size(), m, c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); - - } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_transform, value, test_types2, fixture ) { - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; + constexpr auto first_order = ublas::layout::first_order{}; + constexpr auto last_order = ublas::layout::last_order {}; - for(auto const& n : extents) { - auto a = vector_type(product(n)); - auto b = vector_type(product(n)); - auto c = vector_type(product(n)); + for(auto const& n : extents) { - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wb = ublas::strides_t,ublas::layout::last_order> (n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + auto a = vector_t(ublas::product(n)); + auto b = vector_t(ublas::product(n)); + auto c = vector_t(ublas::product(n)); - auto v = value_type{}; - for(auto i = 0ul; i < a.size(); ++i, v+=1){ - a[i]=v; - } + auto wa = ublas::to_strides(n,first_order); + auto wb = ublas::to_strides(n,last_order ); + auto wc = ublas::to_strides(n,first_order); - ublas::transform( n.size(), n.data(), b.data(), wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ); - ublas::transform( n.size(), n.data(), c.data(), wc.data(), b.data(), wb.data(), [](value_type const& a){ return a - value_type(1);} ); + auto v = value_type{}; + for(auto i = 0ul; i < a.size(); ++i, v+=1){ + a[i]=v; + } - using size_type = typename ublas::strides_t,ublas::layout::first_order>::value_type; + ublas::transform( ublas::size(n), n.data(), b.data(), wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ); + ublas::transform( ublas::size(n), n.data(), c.data(), wc.data(), b.data(), wb.data(), [](value_type const& a){ return a - value_type(1);} ); - size_type zero = 0; - ublas::transform(zero, n.data(), c.data(), wc.data(), b.data(), wb.data(), [](value_type const& a){ return a + value_type(1);} ); + auto zero = std::size_t{0}; + ublas::transform(zero, n.data(), c.data(), wc.data(), b.data(), wb.data(), [](value_type const& a){ return a + value_type(1);} ); - value_type* c0 = nullptr; - const size_type* s0 = nullptr; - size_type const*const p0 = nullptr; + value_type* c0 = nullptr; + const std::size_t* s0 = nullptr; + std::size_t const*const p0 = nullptr; - BOOST_CHECK_THROW(ublas::transform( n.size(), n.data(), c0, wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); - BOOST_CHECK_THROW(ublas::transform( n.size(), n.data(), b.data(), s0, a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); - BOOST_CHECK_THROW(ublas::transform( n.size(), p0, b.data(), wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); + BOOST_CHECK_THROW(ublas::transform( ublas::size(n), n.data(), c0, wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); + BOOST_CHECK_THROW(ublas::transform( ublas::size(n), n.data(), b.data(), s0, a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); + BOOST_CHECK_THROW(ublas::transform( ublas::size(n), p0, b.data(), wb.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error); - for(auto i = 1ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i], a[i] ); + for(auto i = 1ul; i < c.size(); ++i) + BOOST_CHECK_EQUAL( c[i], a[i] ); - } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_transform_exceptions, value, test_types2, fixture ) { - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; - - for(auto const& n : extents) { - - value_type* a = nullptr; - auto c = vector_type(product(n)); - - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - - BOOST_REQUIRE_THROW( ublas::transform( n.size(), n.data(), c.data(), wc.data(), a, wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - value_type* a = nullptr; - value_type* c = nullptr; - - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - - BOOST_REQUIRE_THROW( ublas::transform( n.size(), n.data(), c, wc.data(), a, wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; - for(auto const& n : extents) { + constexpr auto first_order = ublas::layout::first_order{}; - auto a = vector_type(product(n)); - value_type* c = nullptr; + for(auto const& n : extents) { - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + value_type* a = nullptr; + auto c = vector_t(ublas::product(n)); - BOOST_REQUIRE_THROW( ublas::transform( n.size(), n.data(), c, wc.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } - - for(auto const& n : extents) { + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), n.data(), c.data(), wc.data(), a, wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - size_t* wa = nullptr; - auto wc = ublas::strides_t,ublas::layout::first_order>(n); + } - BOOST_REQUIRE_THROW( ublas::transform( n.size(), n.data(), c.data(), wc.data(), a.data(), wa, [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } + for(auto const& n : extents) { - for(auto const& n : extents) { + value_type* a = nullptr; + value_type* c = nullptr; - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); - size_t* wc = nullptr; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), n.data(), c, wc.data(), a, wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - BOOST_REQUIRE_THROW( ublas::transform( n.size(), n.data(), c.data(), wc, a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } + } - for(auto const& n : extents) { + for(auto const& n : extents) { - auto a = vector_type(product(n)); - auto c = vector_type(product(n)); - - size_t* m = nullptr; - auto wc = ublas::strides_t,ublas::layout::first_order>(n); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); + auto a = vector_t(ublas::product(n)); + value_type* c = nullptr; - BOOST_REQUIRE_THROW( ublas::transform( n.size(), m, c.data(), wc.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - - } -} + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_accumulate, value, test_types2, fixture ) -{ - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), n.data(), c, wc.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); + } - for(auto const& n : extents) { + for(auto const& n : extents) { - auto const s = product(n); + auto a = vector_t(product(n)); + auto c = vector_t(product(n)); - auto a = vector_type(product(n)); - // auto b = vector_type(product(n)); - // auto c = vector_type(product(n)); + size_t* wa = nullptr; + auto wc = ublas::to_strides(n,first_order); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - // auto wb = ublas::strides_t,ublas::layout::last_order> (n); - // auto wc = ublas::strides_t,ublas::layout::first_order>(n); + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), n.data(), c.data(), wc.data(), a.data(), wa, [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - auto v = value_type{}; - for(auto i = 0ul; i < a.size(); ++i, v+=value_type(1)){ - a[i]=v; - } + } - auto acc = ublas::accumulate( n.size(), n.data(), a.data(), wa.data(), v); + for(auto const& n : extents) { - BOOST_CHECK_EQUAL( acc, value_type( static_cast< inner_type_t >( s*(s+1) / 2 ) ) ); + auto a = vector_t(ublas::product(n)); + auto c = vector_t(ublas::product(n)); - using size_type = typename ublas::strides_t,ublas::layout::first_order>::value_type; - size_type zero = 0; - (void)ublas::accumulate(zero, n.data(), a.data(), wa.data(),v); + size_t* wc = nullptr; + auto wa = ublas::to_strides(n,first_order); - value_type* c0 = nullptr; - size_type const*const p0 = nullptr; + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), n.data(), c.data(), wc, a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), n.data(), c0, wa.data(), v), std::runtime_error); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), n.data(), a.data(), p0, v), std::runtime_error); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), p0, a.data(), wa.data(), v), std::runtime_error); + } + for(auto const& n : extents) { - auto acc2 = ublas::accumulate( n.size(), n.data(), a.data(), wa.data(), v, - [](auto const& l, auto const& r){return l + r; }); + auto a = vector_t(product(n)); + auto c = vector_t(product(n)); - BOOST_CHECK_EQUAL( acc2, value_type( static_cast< inner_type_t >( s*(s+1) / 2 ) ) ); + size_t* m = nullptr; + auto wa = ublas::to_strides(n,first_order); + auto wc = ublas::to_strides(n,first_order); - (void)ublas::accumulate(zero, n.data(), a.data(), wa.data(), v, [](auto const& l, auto const& r){return l + r; }); + BOOST_REQUIRE_THROW( ublas::transform( ublas::size(n), m, c.data(), wc.data(), a.data(), wa.data(), [](value_type const& a){ return a + value_type(1);} ), std::runtime_error ); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), n.data(), c0, wa.data(), v,[](auto const& l, auto const& r){return l + r; }), std::runtime_error); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), n.data(), a.data(), p0, v, [](auto const& l, auto const& r){return l + r; }), std::runtime_error); - BOOST_CHECK_THROW((void)ublas::accumulate( n.size(), p0, a.data(), wa.data(),v, [](auto const& l, auto const& r){return l + r; }), std::runtime_error); - - } + } } - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_accumulate_exceptions, value, test_types2, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_accumulate, value, test_types2, fixture ) { - using namespace boost::numeric; - using value_type = value; - using vector_type = std::vector; - - for(auto const& n : extents) { + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; - value_type* a = nullptr; + constexpr auto first_order = ublas::layout::first_order{}; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - BOOST_REQUIRE_THROW( (void)ublas::accumulate( n.size(), n.data(), a, wa.data(), value_type{0} ), std::runtime_error ); - - } + for(auto const& n : extents) { - for(auto const& n : extents) { + auto const s = ublas::product(n); - value_type* a = nullptr; + auto a = vector_t(ublas::product(n)); + auto wa = ublas::to_strides(n,first_order); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - BOOST_REQUIRE_THROW( (void)ublas::accumulate( n.size(), n.data(), a, wa.data(), value_type{0},[](value_type const& a,value_type const& b){ return a + b;} ), std::runtime_error ); - + auto v = value_type{}; + for(auto i = 0ul; i < a.size(); ++i, v+=value_type(1)){ + a[i]=v; } - for(auto const& n : extents) { + auto acc = ublas::accumulate( ublas::size(n), n.data(), a.data(), wa.data(), v); - auto a = vector_type(product(n)); + auto sum = std::div(s*(s+1),2).quot; - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - size_t p = 0u; - BOOST_CHECK_EQUAL ( ublas::accumulate( p, n.data(), a.data(), wa.data(), value_type{0} ), value_type{0} ); - - } - - for(auto const& n : extents) { + BOOST_CHECK_EQUAL( acc, value_type( static_cast< inner_type_t >( sum ) ) ); - auto a = vector_type(product(n)); + auto zero = std::size_t{0}; + (void)ublas::accumulate(zero, n.data(), a.data(), wa.data(),v); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); - size_t p = 0u; - BOOST_CHECK_EQUAL( ublas::accumulate( p, n.data(), a.data(), wa.data(), value_type{0}, [](value_type const& a,value_type const& b){ return a + b;} ), value_type{0} ); - - } + value_type* c0 = nullptr; + std::size_t const*const p0 = nullptr; - for(auto const& n : extents) { + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), n.data(), c0, wa.data(), v), std::runtime_error); + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), n.data(), a.data(), p0, v), std::runtime_error); + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), p0, a.data(), wa.data(), v), std::runtime_error); - auto a = vector_type(product(n)); - size_t* wa = nullptr; + auto acc2 = ublas::accumulate( ublas::size(n), n.data(), a.data(), wa.data(), v, + [](auto const& l, auto const& r){return l + r; }); - BOOST_REQUIRE_THROW( (void)ublas::accumulate( n.size(), n.data(), a.data(), wa, value_type{0} ), std::runtime_error ); - - } + BOOST_CHECK_EQUAL( acc2, value_type( static_cast< inner_type_t >( sum ) ) ); - for(auto const& n : extents) { + (void)ublas::accumulate(zero, n.data(), a.data(), wa.data(), v, [](auto const& l, auto const& r){return l + r; }); - auto a = vector_type(product(n)); + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), n.data(), c0, wa.data(), v,[](auto const& l, auto const& r){return l + r; }), std::runtime_error); + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), n.data(), a.data(), p0, v, [](auto const& l, auto const& r){return l + r; }), std::runtime_error); + BOOST_CHECK_THROW((void)ublas::accumulate( ublas::size(n), p0, a.data(), wa.data(),v, [](auto const& l, auto const& r){return l + r; }), std::runtime_error); - auto wa = ublas::strides_t,ublas::layout::first_order>(n); + } +} - size_t* m = nullptr; - BOOST_REQUIRE_THROW( (void)ublas::accumulate( n.size(), m, a.data(), wa.data(), value_type{0}, [](value_type const& a,value_type const& b){ return a + b;} ), std::runtime_error ); - - } +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_accumulate_exceptions, value, test_types2, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = value; + using vector_t = std::vector; + constexpr auto first_order = ublas::layout::first_order{}; + + + for(auto const& n : extents) { + value_type* a = nullptr; + auto wa = ublas::to_strides(n,first_order); + BOOST_REQUIRE_THROW( (void)ublas::accumulate( ublas::size(n), n.data(), a, wa.data(), value_type{0} ), std::runtime_error ); + + } + + for(auto const& n : extents) { + value_type* a = nullptr; + auto wa = ublas::to_strides(n,first_order); + BOOST_REQUIRE_THROW( (void)ublas::accumulate( ublas::size(n), n.data(), a, wa.data(), value_type{0},[](value_type const& a,value_type const& b){ return a + b;} ), std::runtime_error ); + } + + for(auto const& n : extents) { + auto a = vector_t(product(n)); + auto wa = ublas::to_strides(n,first_order); + size_t p = 0u; + BOOST_CHECK_EQUAL ( ublas::accumulate( p, n.data(), a.data(), wa.data(), value_type{0} ), value_type{0} ); + } + + for(auto const& n : extents) { + auto a = vector_t(product(n)); + auto wa = ublas::to_strides(n,first_order); + size_t p = 0u; + BOOST_CHECK_EQUAL( ublas::accumulate( p, n.data(), a.data(), wa.data(), value_type{0}, [](value_type const& a,value_type const& b){ return a + b;} ), value_type{0} ); + } + + for(auto const& n : extents) { + auto a = vector_t(product(n)); + size_t* wa = nullptr; + BOOST_REQUIRE_THROW( (void)ublas::accumulate( ublas::size(n), n.data(), a.data(), wa, value_type{0} ), std::runtime_error ); + } + + for(auto const& n : extents) { + auto a = vector_t(product(n)); + auto wa = ublas::to_strides(n,first_order); + size_t* m = nullptr; + BOOST_REQUIRE_THROW( (void)ublas::accumulate( ublas::size(n), m, a.data(), wa.data(), value_type{0}, [](value_type const& a,value_type const& b){ return a + b;} ), std::runtime_error ); + } } @@ -460,282 +418,146 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_accumulate_exceptions, template void init(std::vector& a) { - auto v = V(1); - for(auto i = 0u; i < a.size(); ++i, ++v){ - a[i] = v; - } + auto v = V(1); + for(auto i = 0u; i < a.size(); ++i, ++v){ + a[i] = v; + } } template void init(std::vector>& a) { - auto v = std::complex(1,1); - for(auto i = 0u; i < a.size(); ++i){ - a[i] = v; - v.real(v.real()+1); - v.imag(v.imag()+1); - } + auto v = std::complex(1,1); + for(auto i = 0u; i < a.size(); ++i){ + a[i] = v; + v.real(v.real()+1); + v.imag(v.imag()+1); + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_trans, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using size_type = typename extents_type::value_type; - using permutation_type = std::vector; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; +// using layout_t = typename value::second_type; + using vector_t = std::vector; + using base_t = typename extents_t::base_type; + using permutation_type = std::vector; + constexpr auto first_order = ublas::layout::first_order{}; - for(auto const& n : extents) { - auto p = n.size(); - auto s = product(n); + for(auto const& n : extents) { - auto pi = permutation_type(p); - auto a = vector_type(s); - auto b1 = vector_type(s); - auto b2 = vector_type(s); - auto c1 = vector_type(s); - auto c2 = vector_type(s); + auto p = ublas::size(n); + auto s = ublas::product(n); - auto wa = strides_type(n); + auto pi = permutation_type(p); + auto a = vector_t(s); + auto b1 = vector_t(s); + auto b2 = vector_t(s); + auto c1 = vector_t(s); + auto c2 = vector_t(s); - init(a); + auto wa = ublas::to_strides(n,first_order); - // so wie last-order. - for(auto i = size_type(0), j = p; i < n.size(); ++i, --j) - pi[i] = j; + init(a); - auto nc = typename extents_type::base_type (p); - for(auto i = 0u; i < p; ++i) - nc[pi[i]-1] = n[i]; + // so wie last-order. + for(auto i = std::size_t{0}, j = p; i < ublas::size(n); ++i, --j) + pi[i] = j; - auto wc = strides_type(extents_type(nc)); - auto wc_pi = typename strides_type::base_type (p); - for(auto i = 0u; i < p; ++i) - wc_pi[pi[i]-1] = wc[i]; + auto nc_base = base_t(p); + for(auto i = 0u; i < p; ++i) + nc_base[pi[i]-1] = n[i]; - ublas::copy ( p, n.data(), c1.data(), wc_pi.data(), a.data(), wa.data()); - ublas::trans( p, n.data(), pi.data(), c2.data(), wc.data(), a.data(), wa.data() ); + auto nc = extents_t(std::move(nc_base)); - if(!std::is_compound_v) - for(auto i = 0ul; i < s; ++i) - BOOST_CHECK_EQUAL( c1[i], c2[i] ); + auto wc = ublas::to_strides(nc,first_order); + auto wc_pi = base_t(p); + for(auto i = 0u; i < p; ++i) + wc_pi[pi[i]-1] = wc[i]; + ublas::copy ( p, n.data(), c1.data(), wc_pi.data(), a.data(), wa.data()); + ublas::trans( p, n.data(), pi.data(), c2.data(), wc.data(), a.data(), wa.data() ); - auto nb = typename extents_type::base_type (p); - for(auto i = 0u; i < p; ++i) - nb[pi[i]-1] = nc[i]; + if(!std::is_compound_v) + for(auto i = 0ul; i < s; ++i) + BOOST_CHECK_EQUAL( c1[i], c2[i] ); - auto wb = strides_type (extents_type(nb)); - auto wb_pi = typename strides_type::base_type (p); - for(auto i = 0u; i < p; ++i) - wb_pi[pi[i]-1] = wb[i]; - ublas::copy ( p, nc.data(), b1.data(), wb_pi.data(), c1.data(), wc.data()); - ublas::trans( p, nc.data(), pi.data(), b2.data(), wb.data(), c2.data(), wc.data() ); + auto nb_base = base_t(p); + for(auto i = 0u; i < p; ++i) + nb_base[pi[i]-1] = nc[i]; - if(!std::is_compound_v) - for(auto i = 0ul; i < s; ++i) - BOOST_CHECK_EQUAL( b1[i], b2[i] ); + auto nb = extents_t(std::move(nb_base)); - for(auto i = 0ul; i < s; ++i) - BOOST_CHECK_EQUAL( a[i], b2[i] ); + auto wb = ublas::to_strides(nb,first_order); + auto wb_pi = base_t(p); + for(auto i = 0u; i < p; ++i) + wb_pi[pi[i]-1] = wb[i]; - size_type zero = 0; - ublas::trans( zero, n.data(), pi.data(), c2.data(), wc.data(), a.data(), wa.data() ); - ublas::trans( zero, nc.data(), pi.data(), b2.data(), wb.data(), c2.data(), wc.data() ); + ublas::copy ( p, nc.data(), b1.data(), wb_pi.data(), c1.data(), wc.data()); + ublas::trans( p, nc.data(), pi.data(), b2.data(), wb.data(), c2.data(), wc.data() ); - value_type *c0 = nullptr; - size_type const*const s0 = nullptr; + if(!std::is_compound_v) + for(auto i = 0ul; i < s; ++i) + BOOST_CHECK_EQUAL( b1[i], b2[i] ); - BOOST_CHECK_THROW(ublas::trans( p, n.data(), pi.data(), c0, wc.data(), a.data(), wa.data()), std::runtime_error); - BOOST_CHECK_THROW(ublas::trans( p, s0, pi.data(), c2.data(),wc.data(), a.data(), wa.data()), std::runtime_error); - BOOST_CHECK_THROW(ublas::trans( p, n.data(), pi.data(), c2.data(), s0, a.data(), wa.data()), std::runtime_error); - BOOST_CHECK_THROW(ublas::trans( p, n.data(), s0, c2.data(), wc.data(), a.data(), wa.data()), std::runtime_error); + for(auto i = 0ul; i < s; ++i) + BOOST_CHECK_EQUAL( a[i], b2[i] ); - } -} + auto zero = std::size_t{0}; + ublas::trans( zero, n.data(), pi.data(), c2.data(), wc.data(), a.data(), wa.data() ); + ublas::trans( zero, nc.data(), pi.data(), b2.data(), wb.data(), c2.data(), wc.data() ); + value_type *c0 = nullptr; + std::size_t const*const s0 = nullptr; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_trans_exceptions, value, test_types, fixture ) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using size_type = typename extents_type::value_type; - using permutation_type = std::vector; - - for(auto const& n : extents) { + BOOST_CHECK_THROW(ublas::trans( p, n.data(), pi.data(), c0, wc.data(), a.data(), wa.data()), std::runtime_error); + BOOST_CHECK_THROW(ublas::trans( p, s0, pi.data(), c2.data(),wc.data(), a.data(), wa.data()), std::runtime_error); + BOOST_CHECK_THROW(ublas::trans( p, n.data(), pi.data(), c2.data(), s0, a.data(), wa.data()), std::runtime_error); + BOOST_CHECK_THROW(ublas::trans( p, n.data(), s0, c2.data(), wc.data(), a.data(), wa.data()), std::runtime_error); - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - value_type* a = nullptr; - auto c = vector_type(s); - - auto wa = strides_type(n); - - auto nc = typename extents_type::base_type (p); - auto wc = strides_type(n); - auto wc_pi = typename strides_type::base_type (p); - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), a, wa.data(), c.data(), wc.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - value_type* a = nullptr; - auto c = vector_type(s); - - auto wa = strides_type(n); - auto nc = typename extents_type::base_type (p); - - auto wc = strides_type(n); - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), c.data(), wc.data(), a, wa.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - - auto pi = permutation_type(p); - value_type* a = nullptr; - value_type* c = nullptr; - - auto wa = strides_type(n); - auto nc = typename extents_type::base_type (p); - - auto wc = strides_type(n); - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), c, wc.data(), a, wa.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - auto a = vector_type(s); - auto c = vector_type(s); - - auto wa = strides_type(n); - - auto nc = typename extents_type::base_type (p); - - size_t* wc = nullptr; - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), c.data(), wc, a.data(), wa.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - auto a = vector_type(s); - auto c = vector_type(s); - - auto wc = strides_type(n); - auto nc = typename extents_type::base_type (p); - - size_t* wa = nullptr; - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), c.data(), wc.data(), a.data(), wa ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - auto a = vector_type(s); - auto c = vector_type(s); - - size_t* wc = nullptr; - - auto nc = typename extents_type::base_type (p); - - size_t* wa = nullptr; - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi.data(), c.data(), wc, a.data(), wa ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - size_type* pi = nullptr; - auto a = vector_type(s); - auto c = vector_type(s); - - auto wa = strides_type(n); - - auto nc = typename extents_type::base_type (p); - auto wc = strides_type(n); - - BOOST_REQUIRE_THROW( ublas::trans( p, nc.data(), pi, c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - auto p = n.size(); - auto s = product(n); - - auto pi = permutation_type(p); - auto a = vector_type(s); - auto c = vector_type(s); - - auto wa = strides_type(n); - size_t* nc = nullptr; - - auto wc = strides_type(n); - - BOOST_REQUIRE_THROW( ublas::trans( p, nc, pi.data(), c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); - - } - - for(auto const& n : extents) { - - size_type p = 1; - auto s = product(n); - - auto pi = permutation_type(p); - auto a = vector_type(s); - auto c = vector_type(s); + } +} - auto wa = strides_type(n); - auto nc = typename extents_type::base_type (p); - - auto wc = strides_type(n); - ublas::trans( p, nc.data(), pi.data(), c.data(), wc.data(), a.data(), wa.data() ); - - } +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_algorithms_trans_exceptions, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using permutation_type = std::vector; + + constexpr auto layout = layout_t{}; + + std::size_t* nnullptr = nullptr; + value_type * anullptr = nullptr; + + for(auto const& n : extents) { + auto p = ublas::size(n); + auto s = ublas::product(n); + auto pi = permutation_type(p); + auto a = vector_t(s); + auto c = vector_t(s); + auto wa = ublas::to_strides(n,layout); + auto wc = ublas::to_strides(n,layout); + if(p>1){ + BOOST_REQUIRE_THROW( ublas::trans( p, nnullptr, pi.data(), c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , nnullptr , c.data(), wc.data(), a.data(), wa.data() ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), c.data(), nnullptr , a.data(), nnullptr ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), c.data(), wc.data(), a.data(), nnullptr ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), c.data(), nnullptr , a.data(), wa.data() ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), anullptr, wc.data(), anullptr, wa.data() ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), c.data(), wc.data(), anullptr, wa.data() ), std::runtime_error ); + BOOST_REQUIRE_THROW( ublas::trans( p, n.data() , pi.data(), anullptr, wc.data(), a.data(), wa.data() ), std::runtime_error ); + } + + // ublas::trans( p, n.data(), pi.data(), c.data(), wc.data(), a.data(), wa.data() ); + } } diff --git a/test/tensor/test_einstein_notation.cpp b/test/tensor/test_einstein_notation.cpp index 0abda5b96..400011a7c 100644 --- a/test/tensor/test_einstein_notation.cpp +++ b/test/tensor/test_einstein_notation.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,15 +12,18 @@ // And we acknowledge the support from all contributors. -#include -#include #include - #include + +#include +#include + + #include "utility.hpp" -BOOST_AUTO_TEST_SUITE ( test_einstein_notation, * boost::unit_test::depends_on("test_multi_index") ) +BOOST_AUTO_TEST_SUITE ( test_einstein_notation/*, + *boost::unit_test::depends_on("test_multi_index") */) using test_types = zip>::with_t; @@ -29,94 +32,115 @@ using test_types = zip>::with_t; - using namespace boost::numeric::ublas::index; - - { - auto A = tensor_type{5,3}; - auto B = tensor_type{3,4}; - // auto C = tensor_type{4,5,6}; - - for(auto j = 0u; j < A.extents().at(1); ++j) - for(auto i = 0u; i < A.extents().at(0); ++i) - A.at( i,j ) = value_type( static_cast< inner_type_t >(i+1) ); - - for(auto j = 0u; j < B.extents().at(1); ++j) - for(auto i = 0u; i < B.extents().at(0); ++i) - B.at( i,j ) = value_type( static_cast< inner_type_t >(i+1) ); - - - - auto AB = A(_,_e) * B(_e,_); + namespace ublas = boost::numeric::ublas; + + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; + // NOLINTNEXTLINE(google-build-using-namespace) + using namespace boost::numeric::ublas::index; + + { + auto A = tensor_t(5,3); + auto B = tensor_t{3,4}; + // auto C = tensor_t{4,5,6}; + + for(auto j = 0u; j < A.extents().at(1); ++j){ + for(auto i = 0u; i < A.extents().at(0); ++i){ + A.at( i,j ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } - // std::cout << "A = " << A << std::endl; - // std::cout << "B = " << B << std::endl; - // std::cout << "AB = " << AB << std::endl; + for(auto j = 0u; j < B.extents().at(1); ++j){ + for(auto i = 0u; i < B.extents().at(0); ++i){ + B.at( i,j ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } - for(auto j = 0u; j < AB.extents().at(1); ++j) - for(auto i = 0u; i < AB.extents().at(0); ++i) - BOOST_CHECK_EQUAL( AB.at( i,j ) , value_type(A.at( i,0 ) * ( B.extents().at(0) * (B.extents().at(0)+1) / 2 )) ); + auto AB = A(_,_e) * B(_e,_); + // std::cout << "A = " << A << std::endl; + // std::cout << "B = " << B << std::endl; + // std::cout << "AB = " << AB << std::endl; + for(auto j = 0u; j < AB.extents().at(1); ++j){ + for(auto i = 0u; i < AB.extents().at(0); ++i){ + auto e0 = B.extents().at(0); + auto sum = std::div(e0*(e0+1),2); + auto quot = value_t(sum.quot); + BOOST_CHECK_EQUAL( AB.at(i,j) , A.at(i,0)*quot ); + } } + } - { - auto A = tensor_type{4,5,3}; - auto B = tensor_type{3,4,2}; - - for(auto k = 0u; k < A.extents().at(2); ++k) - for(auto j = 0u; j < A.extents().at(1); ++j) - for(auto i = 0u; i < A.extents().at(0); ++i) - A.at( i,j,k ) = value_type( static_cast< inner_type_t >(i+1) ); + { + auto A = tensor_t{4,5,3}; + auto B = tensor_t{3,4,2}; - for(auto k = 0u; k < B.extents().at(2); ++k) - for(auto j = 0u; j < B.extents().at(1); ++j) - for(auto i = 0u; i < B.extents().at(0); ++i) - B.at( i,j,k ) = value_type( static_cast< inner_type_t >(i+1) ); + for(auto k = 0u; k < A.extents().at(2); ++k){ + for(auto j = 0u; j < A.extents().at(1); ++j){ + for(auto i = 0u; i < A.extents().at(0); ++i){ + A.at( i,j,k ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } + } - auto AB = A(_d,_,_f) * B(_f,_d,_); + for(auto k = 0u; k < B.extents().at(2); ++k){ + for(auto j = 0u; j < B.extents().at(1); ++j){ + for(auto i = 0u; i < B.extents().at(0); ++i){ + B.at( i,j,k ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } + } - // std::cout << "A = " << A << std::endl; - // std::cout << "B = " << B << std::endl; - // std::cout << "AB = " << AB << std::endl; - // n*(n+1)/2; - auto const nf = ( B.extents().at(0) * (B.extents().at(0)+1) / 2 ); - auto const nd = ( A.extents().at(0) * (A.extents().at(0)+1) / 2 ); + auto AB = A(_d,_,_f) * B(_f,_d,_); - for(auto j = 0u; j < AB.extents().at(1); ++j) - for(auto i = 0u; i < AB.extents().at(0); ++i) - BOOST_CHECK_EQUAL( AB.at( i,j ) , value_type( static_cast< inner_type_t >(nf * nd) ) ); + // std::cout << "A = " << A << std::endl; + // std::cout << "B = " << B << std::endl; + // std::cout << "AB = " << AB << std::endl; + // n*(n+1)/2; + auto const nf = ( B.extents().at(0) * (B.extents().at(0)+1) / 2 ); + auto const nd = ( A.extents().at(0) * (A.extents().at(0)+1) / 2 ); + for(auto j = 0u; j < AB.extents().at(1); ++j){ + for(auto i = 0u; i < AB.extents().at(0); ++i){ + BOOST_CHECK_EQUAL( AB.at( i,j ) , value_t( static_cast< inner_type_t >(nf * nd) ) ); + } } + } - { - auto A = tensor_type{4,3}; - auto B = tensor_type{3,4,2}; + { + auto A = tensor_t{{4,3}}; + auto B = tensor_t{3,4,2}; - for(auto j = 0u; j < A.extents().at(1); ++j) - for(auto i = 0u; i < A.extents().at(0); ++i) - A.at( i,j ) = value_type( static_cast< inner_type_t >(i+1) ); + for(auto j = 0u; j < A.extents().at(1); ++j){ + for(auto i = 0u; i < A.extents().at(0); ++i){ + A.at( i,j ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } - for(auto k = 0u; k < B.extents().at(2); ++k) - for(auto j = 0u; j < B.extents().at(1); ++j) - for(auto i = 0u; i < B.extents().at(0); ++i) - B.at( i,j,k ) = value_type( static_cast< inner_type_t >(i+1) ); - auto AB = A(_d,_f) * B(_f,_d,_); + for(auto k = 0u; k < B.extents().at(2); ++k){ + for(auto j = 0u; j < B.extents().at(1); ++j){ + for(auto i = 0u; i < B.extents().at(0); ++i){ + B.at( i,j,k ) = value_t( static_cast< inner_type_t >(i+1) ); + } + } + } - // n*(n+1)/2; - auto const nf = ( B.extents().at(0) * (B.extents().at(0)+1) / 2 ); - auto const nd = ( A.extents().at(0) * (A.extents().at(0)+1) / 2 ); + auto AB = A(_d,_f) * B(_f,_d,_); - for(auto i = 0u; i < AB.extents().at(0); ++i) - BOOST_CHECK_EQUAL ( AB.at( i ) , value_type( static_cast< inner_type_t >(nf * nd) ) ); + // n*(n+1)/2; + auto const nf = ( B.extents().at(0) * (B.extents().at(0)+1) / 2 ); + auto const nd = ( A.extents().at(0) * (A.extents().at(0)+1) / 2 ); + for(auto i = 0u; i < AB.extents().at(0); ++i){ + BOOST_CHECK_EQUAL ( AB.at( i ) , value_t( static_cast< inner_type_t >(nf * nd) ) ); } + + } } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_expression.cpp b/test/tensor/test_expression.cpp index 64ce969df..3d884e72c 100644 --- a/test/tensor/test_expression.cpp +++ b/test/tensor/test_expression.cpp @@ -11,7 +11,7 @@ - +#include #include #include #include @@ -28,49 +28,53 @@ using test_types = zip>::with_t; - fixture() - : extents { - extents_type{}, // 0 - - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{1,2,3}, // 6 - extents_type{1,1,2,3}, // 7 - extents_type{1,2,3,1,1}, // 8 - - extents_type{4,2,3}, // 9 - extents_type{4,2,1,3}, // 10 - extents_type{4,2,1,3,1}, // 11 - extents_type{1,4,2,1,3,1} } // 12 + + const std::vector extents { - } - std::vector extents; +// extents_type{ }, // 0 + + extents_type{1,1}, // 1 + extents_type{1,2}, // 2 + extents_type{2,1}, // 3 + + extents_type{2,3}, // 4 + extents_type{2,3,1}, // 5 + extents_type{1,2,3}, // 6 + extents_type{1,1,2,3}, // 7 + extents_type{1,2,3,1,1}, // 8 + + extents_type{4,2,3}, // 9 + extents_type{4,2,1,3}, // 10 + extents_type{4,2,1,3,1}, // 11 + extents_type{1,4,2,1,3,1} // 12 + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_access, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using tensor_expression_type = typename tensor_type::super_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; + using expression_t = typename tensor_t::super_type; for(auto const& e : extents) { - auto v = value_type{}; - auto t = tensor_type(e); + if(!ublas::is_valid(e)){ + continue; + } - for(auto& tt: t){ tt = v; v+=value_type{1}; } - const auto& tensor_expression_const = static_cast( t ); + auto v = value_t{}; + auto t = tensor_t(e); - for(auto i = 0ul; i < t.size(); ++i) + for(auto& tt: t){ tt = v; v+=value_t{1}; } + const auto& tensor_expression_const = static_cast( t ); + + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( tensor_expression_const()(i), t(i) ); + } } } @@ -79,36 +83,39 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_access, value, test_ty BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_expression, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); + auto uplus1 = [](auto const& a){ return a+value_t{1}; }; + //auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_t(1) ); for(auto const& e : extents) { - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t) { tt = v; v+=value_type{1}; } + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t) { tt = v; v+=value_t{1}; } - const auto uexpr = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + const auto uexpr = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( uexpr(i), uplus1(t(i)) ); + } - auto uexpr_uexpr = ublas::detail::make_unary_tensor_expression( uexpr, uplus1 ); + auto uexpr_uexpr = ublas::detail::make_unary_tensor_expression( uexpr, uplus1 ); - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( uexpr_uexpr(i), uplus1(uplus1(t(i))) ); + } const auto & uexpr_e = uexpr.e; - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr_e) >, tensor_type > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr_e) >, tensor_t > ) ); const auto & uexpr_uexpr_e_e = uexpr_uexpr.e.e; - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr_uexpr_e_e) >, tensor_type > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr_uexpr_e_e) >, tensor_t > ) ); } @@ -117,52 +124,58 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_expression, value, test_typ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_expression, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, std::placeholders::_1, value_type(2) ); - auto bplus = std::plus {}; - auto bminus = std::minus{}; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; + + auto uplus1 = [](auto const& a){ return a+value_t{1}; }; + auto uplus2 = [](auto const& a){ return a+value_t{2}; }; + //auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_t(1) ); + //auto uplus2 = std::bind( std::plus{}, std::placeholders::_1, value_t(2) ); + auto bplus = std::plus {}; + auto bminus = std::minus{}; for(auto const& e : extents) { - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t){ tt = v; v+=value_type{1}; } + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr1.e) >, tensor_type > ) ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr2.e) >, tensor_type > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr1.e) >, tensor_t > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(uexpr2.e) >, tensor_t > ) ); - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( uexpr1(i), uplus1(t(i)) ); + } - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( uexpr2(i), uplus2(t(i)) ); + } - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_uexpr.el.e) >, tensor_type > ) ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_uexpr.er.e) >, tensor_type > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_uexpr.el.e) >, tensor_t > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_uexpr.er.e) >, tensor_t > ) ); - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( bexpr_uexpr(i), bplus(uexpr1(i),uexpr2(i)) ); + } - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.el.el.e) >, tensor_type > ) ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.el.er.e) >, tensor_type > ) ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.er) >, tensor_type > ) ); - BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.er) >, tensor_type > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.el.el.e) >, tensor_t > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.el.er.e) >, tensor_t > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.er) >, tensor_t > ) ); + BOOST_CHECK( ( std::is_same_v< std::decay_t< decltype(bexpr_bexpr_uexpr.er) >, tensor_t > ) ); - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < t.size(); ++i){ BOOST_CHECK_EQUAL( bexpr_bexpr_uexpr(i), bminus(bexpr_uexpr(i),t(i)) ); + } } diff --git a/test/tensor/test_expression_evaluation.cpp b/test/tensor/test_expression_evaluation.cpp index 648134998..5863aa963 100644 --- a/test/tensor/test_expression_evaluation.cpp +++ b/test/tensor/test_expression_evaluation.cpp @@ -12,64 +12,63 @@ + #include #include +#include #include #include "utility.hpp" #include #include -BOOST_AUTO_TEST_SUITE(test_tensor_expression); - +BOOST_AUTO_TEST_SUITE(test_tensor_expression) using test_types = zip>::with_t; - struct fixture { - using extents_type = boost::numeric::ublas::extents<>; - fixture() - : extents{ - extents_type{}, // 0 - - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{1,2,3}, // 6 - extents_type{1,1,2,3}, // 7 - extents_type{1,2,3,1,1}, // 8 - - extents_type{4,2,3}, // 9 - extents_type{4,2,1,3}, // 10 - extents_type{4,2,1,3,1}, // 11 - extents_type{1,4,2,1,3,1}} // 12 + using extents_t = boost::numeric::ublas::extents<>; + + const std::vector extents = { - } - std::vector extents; +// extents_t{}, // 0 + + extents_t{1,1}, // 1 + extents_t{1,2}, // 2 + extents_t{2,1}, // 3 + + extents_t{2,3}, // 4 + extents_t{2,3,1}, // 5 + extents_t{1,2,3}, // 6 + extents_t{1,1,2,3}, // 7 + extents_t{1,2,3,1,1}, // 8 + + extents_t{4,2,3}, // 9 + extents_t{4,2,1,3}, // 10 + extents_t{4,2,1,3,1}, // 11 + extents_t{1,4,2,1,3,1} // 12 + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_retrieve_extents, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); - auto bplus = std::plus {}; - auto bminus = std::minus{}; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; + + auto uplus1 = [](auto const& a){ return a + value_t(1); }; + auto uplus2 = [](auto const& a){ return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; for(auto const& e : extents) { - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t){ tt = v; v+=value_type{1}; } + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } BOOST_CHECK( ublas::detail::retrieve_extents( t ) == e ); @@ -77,20 +76,20 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_retrieve_extents, value // uexpr1 = t+1 // uexpr2 = 2+t - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) == e ); BOOST_CHECK( ublas::detail::retrieve_extents( uexpr2 ) == e ); // bexpr_uexpr = (t+1) + (2+t) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == e ); // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr ) == e ); @@ -100,39 +99,39 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_retrieve_extents, value for(auto i = 0u; i < extents.size()-1; ++i) { - auto v = value_type{}; + auto v = value_t{}; - auto t1 = tensor_type(extents[i]); - for(auto& tt: t1){ tt = v; v+=value_type{1}; } + auto t1 = tensor_t(extents[i]); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } - auto t2 = tensor_type(extents[i+1]); - for(auto& tt: t2){ tt = v; v+=value_type{2}; } + auto t2 = tensor_t(extents[i+1]); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) != ublas::detail::retrieve_extents( t2 ) ); // uexpr1 = t1+1 // uexpr2 = 2+t2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); BOOST_CHECK( ublas::detail::retrieve_extents( t2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) != ublas::detail::retrieve_extents( uexpr2 ) ); // bexpr_uexpr = (t1+1) + (2+t2) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(t1) ); // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(t2) ); // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(t2) ); } @@ -146,22 +145,21 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_retrieve_extents, value BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_all_extents_equal, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; - - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); - auto bplus = std::plus {}; - auto bminus = std::minus{}; + auto uplus1 = [](auto const& a){ return a + value_t(1); }; + auto uplus2 = [](auto const& a){ return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; for(auto const& e : extents) { - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t){ tt = v; v+=value_type{1}; } + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } BOOST_CHECK( ublas::detail::all_extents_equal( t , e ) ); @@ -169,20 +167,20 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_all_extents_equal, valu // uexpr1 = t+1 // uexpr2 = 2+t - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, e ) ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, e ) ); // bexpr_uexpr = (t+1) + (2+t) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_uexpr, e ) ); // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_bexpr_uexpr , e ) ); @@ -192,52 +190,52 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_all_extents_equal, valu for(auto i = 0u; i < extents.size()-1; ++i) { - auto v = value_type{}; + auto v = value_t{}; - auto t1 = tensor_type(extents[i]); - for(auto& tt: t1){ tt = v; v+=value_type{1}; } + auto t1 = tensor_t(extents[i]); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } - auto t2 = tensor_type(extents[i+1]); - for(auto& tt: t2){ tt = v; v+=value_type{2}; } + auto t2 = tensor_t(extents[i+1]); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } BOOST_CHECK( ublas::detail::all_extents_equal( t1, ublas::detail::retrieve_extents(t1) ) ); BOOST_CHECK( ublas::detail::all_extents_equal( t2, ublas::detail::retrieve_extents(t2) ) ); // uexpr1 = t1+1 // uexpr2 = 2+t2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, ublas::detail::retrieve_extents(uexpr1) ) ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, ublas::detail::retrieve_extents(uexpr2) ) ); // bexpr_uexpr = (t1+1) + (2+t2) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr, ublas::detail::retrieve_extents( bexpr_uexpr ) ) ); // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr1, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) ) ); // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) ) ); // bexpr_uexpr2 = (t1+1) + t2 - auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, t2, bplus ); + auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, t2, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_uexpr2 ) ) ); // bexpr_uexpr2 = ((t1+1) + t2) + t1 - auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); + auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr3, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr3 ) ) ); // bexpr_uexpr2 = t1 + (((t1+1) + t2) + t1) - auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); + auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr4, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr4 ) ) ); } diff --git a/test/tensor/test_extents.cpp b/test/tensor/test_extents.cpp deleted file mode 100644 index 2dd9257ff..000000000 --- a/test/tensor/test_extents.cpp +++ /dev/null @@ -1,731 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#include -#include -#include -#include -#include - -BOOST_AUTO_TEST_SUITE ( test_extents ) - - -//*boost::unit_test::label("extents") -//*boost::unit_test::label("constructor") - -BOOST_AUTO_TEST_CASE(test_extents_ctor) -{ - using namespace boost::numeric; - using extents = ublas::basic_extents; - - - auto e0 = extents{}; - BOOST_CHECK( e0.empty()); - BOOST_CHECK ( e0.size() == 0 ); - - auto e1 = extents{1,1}; - BOOST_CHECK(!e1.empty()); - BOOST_CHECK ( e1.size() == 2 ); - - auto e2 = extents{1,2}; - BOOST_CHECK(!e2.empty()); - BOOST_CHECK ( e2.size() == 2 ); - - auto e3 = extents{2,1}; - BOOST_CHECK (!e3.empty()); - BOOST_CHECK ( e3.size() == 2 ); - - auto e4 = extents{2,3}; - BOOST_CHECK(!e4.empty()); - BOOST_CHECK ( e4.size() == 2 ); - - auto e5 = extents{2,3,1}; - BOOST_CHECK (!e5.empty()); - BOOST_CHECK ( e5.size() == 3 ); - - auto e6 = extents{1,2,3}; // 6 - BOOST_CHECK(!e6.empty()); - BOOST_CHECK ( e6.size() == 3 ); - - auto e7 = extents{4,2,3}; // 7 - BOOST_CHECK(!e7.empty()); - BOOST_CHECK ( e7.size() == 3 ); - - BOOST_CHECK_THROW( extents({1,0}), std::length_error); - BOOST_CHECK_THROW( extents({0} ), std::length_error); - BOOST_CHECK_THROW( extents({3} ), std::length_error); - BOOST_CHECK_THROW( extents({0,1}), std::length_error); -} - -BOOST_AUTO_TEST_CASE(test_static_rank_extents_ctor) -{ - namespace ub = boost::numeric::ublas; - - - auto e0 = ub::extents<0>{}; - BOOST_CHECK( e0.empty()); - BOOST_CHECK ( e0.size() == 0); - - auto e1 = ub::extents<2>{1,1}; - BOOST_CHECK(!e1.empty()); - BOOST_CHECK ( e1.size() == 2); - - auto e2 = ub::extents<2>{1,2}; - BOOST_CHECK(!e2.empty()); - BOOST_CHECK ( e2.size() == 2); - - auto e3 = ub::extents<2>{2,1}; - BOOST_CHECK (!e3.empty()); - BOOST_CHECK ( e3.size() == 2); - - auto e4 = ub::extents<2>{2,3}; - BOOST_CHECK(!e4.empty()); - BOOST_CHECK ( e4.size() == 2); - - auto e5 = ub::extents<3>{2,3,1}; - BOOST_CHECK (!e5.empty()); - BOOST_CHECK ( e5.size() == 3); - - auto e6 = ub::extents<3>{1,2,3}; // 6 - BOOST_CHECK(!e6.empty()); - BOOST_CHECK ( e6.size() == 3); - - auto e7 = ub::extents<3>{4,2,3}; // 7 - BOOST_CHECK(!e7.empty()); - BOOST_CHECK ( e7.size() == 3); - - BOOST_CHECK_THROW( ub::extents<2>({1,0}), std::length_error); - BOOST_CHECK_THROW( ub::extents<1>({0} ), std::length_error); - BOOST_CHECK_THROW( ub::extents<1>({3} ), std::length_error); - BOOST_CHECK_THROW( ub::extents<2>({0,1}), std::length_error); - BOOST_CHECK_THROW( ub::extents<2>({1,1,2}), std::out_of_range); -} - - -struct fixture { - using extents_type = boost::numeric::ublas::extents<>; - template - using static_rank_extents_type = boost::numeric::ublas::extents; - - fixture() : extents{ - extents_type{}, // 0 - - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{1,2,3}, // 6 - extents_type{1,1,2,3}, // 7 - extents_type{1,2,3,1,1}, // 8 - - extents_type{4,2,3}, // 9 - extents_type{4,2,1,3}, // 10 - extents_type{4,2,1,3,1}, // 11 - extents_type{1,4,2,1,3,1}, // 12 - - extents_type{1,4,1}, // 13 - extents_type{1,1,1,1}, // 14 - extents_type{1,4,1,1,1}, // 15 - extents_type{1,1,2,1,1,1}, // 16 - extents_type{1,1,2,3,1,1}, // 17 - } - {} - std::vector extents; -}; - -BOOST_FIXTURE_TEST_CASE(test_extents_access, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("access")) -{ - using namespace boost::numeric; - - BOOST_REQUIRE_EQUAL(extents.size(),18); - - BOOST_CHECK_EQUAL (extents[ 0].size(), 0); - BOOST_CHECK (extents[ 0].empty() ); - - BOOST_REQUIRE_EQUAL(extents[ 1].size(), 2); - BOOST_REQUIRE_EQUAL(extents[ 2].size(), 2); - BOOST_REQUIRE_EQUAL(extents[ 3].size(), 2); - BOOST_REQUIRE_EQUAL(extents[ 4].size(), 2); - BOOST_REQUIRE_EQUAL(extents[ 5].size(), 3); - BOOST_REQUIRE_EQUAL(extents[ 6].size(), 3); - BOOST_REQUIRE_EQUAL(extents[ 7].size(), 4); - BOOST_REQUIRE_EQUAL(extents[ 8].size(), 5); - BOOST_REQUIRE_EQUAL(extents[ 9].size(), 3); - BOOST_REQUIRE_EQUAL(extents[10].size(), 4); - BOOST_REQUIRE_EQUAL(extents[11].size(), 5); - BOOST_REQUIRE_EQUAL(extents[12].size(), 6); - BOOST_REQUIRE_EQUAL(extents[13].size(), 3); - BOOST_REQUIRE_EQUAL(extents[14].size(), 4); - BOOST_REQUIRE_EQUAL(extents[15].size(), 5); - BOOST_REQUIRE_EQUAL(extents[16].size(), 6); - BOOST_REQUIRE_EQUAL(extents[17].size(), 6); - - - BOOST_CHECK_EQUAL(extents[1][0],1); - BOOST_CHECK_EQUAL(extents[1][1],1); - - BOOST_CHECK_EQUAL(extents[2][0],1); - BOOST_CHECK_EQUAL(extents[2][1],2); - - BOOST_CHECK_EQUAL(extents[3][0],2); - BOOST_CHECK_EQUAL(extents[3][1],1); - - BOOST_CHECK_EQUAL(extents[4][0],2); - BOOST_CHECK_EQUAL(extents[4][1],3); - - BOOST_CHECK_EQUAL(extents[5][0],2); - BOOST_CHECK_EQUAL(extents[5][1],3); - BOOST_CHECK_EQUAL(extents[5][2],1); - - BOOST_CHECK_EQUAL(extents[6][0],1); - BOOST_CHECK_EQUAL(extents[6][1],2); - BOOST_CHECK_EQUAL(extents[6][2],3); - - BOOST_CHECK_EQUAL(extents[7][0],1); - BOOST_CHECK_EQUAL(extents[7][1],1); - BOOST_CHECK_EQUAL(extents[7][2],2); - BOOST_CHECK_EQUAL(extents[7][3],3); - - BOOST_CHECK_EQUAL(extents[8][0],1); - BOOST_CHECK_EQUAL(extents[8][1],2); - BOOST_CHECK_EQUAL(extents[8][2],3); - BOOST_CHECK_EQUAL(extents[8][3],1); - BOOST_CHECK_EQUAL(extents[8][4],1); - - BOOST_CHECK_EQUAL(extents[9][0],4); - BOOST_CHECK_EQUAL(extents[9][1],2); - BOOST_CHECK_EQUAL(extents[9][2],3); - - BOOST_CHECK_EQUAL(extents[10][0],4); - BOOST_CHECK_EQUAL(extents[10][1],2); - BOOST_CHECK_EQUAL(extents[10][2],1); - BOOST_CHECK_EQUAL(extents[10][3],3); - - BOOST_CHECK_EQUAL(extents[11][0],4); - BOOST_CHECK_EQUAL(extents[11][1],2); - BOOST_CHECK_EQUAL(extents[11][2],1); - BOOST_CHECK_EQUAL(extents[11][3],3); - BOOST_CHECK_EQUAL(extents[11][4],1); - - BOOST_CHECK_EQUAL(extents[12][0],1); - BOOST_CHECK_EQUAL(extents[12][1],4); - BOOST_CHECK_EQUAL(extents[12][2],2); - BOOST_CHECK_EQUAL(extents[12][3],1); - BOOST_CHECK_EQUAL(extents[12][4],3); - BOOST_CHECK_EQUAL(extents[12][5],1); - - BOOST_CHECK_EQUAL(extents[13][0],1); - BOOST_CHECK_EQUAL(extents[13][1],4); - BOOST_CHECK_EQUAL(extents[13][2],1); - - BOOST_CHECK_EQUAL(extents[14][0],1); - BOOST_CHECK_EQUAL(extents[14][1],1); - BOOST_CHECK_EQUAL(extents[14][2],1); - BOOST_CHECK_EQUAL(extents[14][3],1); - - BOOST_CHECK_EQUAL(extents[15][0],1); - BOOST_CHECK_EQUAL(extents[15][1],4); - BOOST_CHECK_EQUAL(extents[15][2],1); - BOOST_CHECK_EQUAL(extents[15][3],1); - BOOST_CHECK_EQUAL(extents[15][4],1); - - BOOST_CHECK_EQUAL(extents[16][0],1); - BOOST_CHECK_EQUAL(extents[16][1],1); - BOOST_CHECK_EQUAL(extents[16][2],2); - BOOST_CHECK_EQUAL(extents[16][3],1); - BOOST_CHECK_EQUAL(extents[16][4],1); - BOOST_CHECK_EQUAL(extents[16][5],1); - - BOOST_CHECK_EQUAL(extents[17][0],1); - BOOST_CHECK_EQUAL(extents[17][1],1); - BOOST_CHECK_EQUAL(extents[17][2],2); - BOOST_CHECK_EQUAL(extents[17][3],3); - BOOST_CHECK_EQUAL(extents[17][4],1); - BOOST_CHECK_EQUAL(extents[17][5],1); -} - -BOOST_FIXTURE_TEST_CASE(test_extents_copy_ctor, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("copy_ctor")) -{ - BOOST_REQUIRE_EQUAL(extents.size(),18); - - auto e0 = extents[ 0]; // {} - auto e1 = extents[ 1]; // {1,1} - auto e2 = extents[ 2]; // {1,2} - auto e3 = extents[ 3]; // {2,1} - auto e4 = extents[ 4]; // {2,3} - auto e5 = extents[ 5]; // {2,3,1} - auto e6 = extents[ 6]; // {1,2,3} - auto e7 = extents[ 7]; // {1,1,2,3} - auto e8 = extents[ 8]; // {1,2,3,1,1} - auto e9 = extents[ 9]; // {4,2,3} - auto e10 = extents[10]; // {4,2,1,3} - auto e11 = extents[11]; // {4,2,1,3,1} - auto e12 = extents[12]; // {1,4,2,1,3,1} - auto e13 = extents[13]; // {1,4,1} - auto e14 = extents[14]; // {1,1,1,1} - auto e15 = extents[15]; // {1,4,1,1,1} - auto e16 = extents[16]; // {1,1,2,1,1,1} - auto e17 = extents[17]; // {1,1,2,3,1,1} - - BOOST_CHECK_EQUAL (e0.size(), 0); - BOOST_CHECK (e0.empty() ); - - BOOST_REQUIRE_EQUAL(e1 .size(), 2); - BOOST_REQUIRE_EQUAL(e2 .size(), 2); - BOOST_REQUIRE_EQUAL(e3 .size(), 2); - BOOST_REQUIRE_EQUAL(e4 .size(), 2); - BOOST_REQUIRE_EQUAL(e5 .size(), 3); - BOOST_REQUIRE_EQUAL(e6 .size(), 3); - BOOST_REQUIRE_EQUAL(e7 .size(), 4); - BOOST_REQUIRE_EQUAL(e8 .size(), 5); - BOOST_REQUIRE_EQUAL(e9 .size(), 3); - BOOST_REQUIRE_EQUAL(e10.size(), 4); - BOOST_REQUIRE_EQUAL(e11.size(), 5); - BOOST_REQUIRE_EQUAL(e12.size(), 6); - BOOST_REQUIRE_EQUAL(e13.size(), 3); - BOOST_REQUIRE_EQUAL(e14.size(), 4); - BOOST_REQUIRE_EQUAL(e15.size(), 5); - BOOST_REQUIRE_EQUAL(e16.size(), 6); - BOOST_REQUIRE_EQUAL(e17.size(), 6); - - - BOOST_CHECK_EQUAL(e1[0],1); - BOOST_CHECK_EQUAL(e1[1],1); - - BOOST_CHECK_EQUAL(e2[0],1); - BOOST_CHECK_EQUAL(e2[1],2); - - BOOST_CHECK_EQUAL(e3[0],2); - BOOST_CHECK_EQUAL(e3[1],1); - - BOOST_CHECK_EQUAL(e4[0],2); - BOOST_CHECK_EQUAL(e4[1],3); - - BOOST_CHECK_EQUAL(e5[0],2); - BOOST_CHECK_EQUAL(e5[1],3); - BOOST_CHECK_EQUAL(e5[2],1); - - BOOST_CHECK_EQUAL(e6[0],1); - BOOST_CHECK_EQUAL(e6[1],2); - BOOST_CHECK_EQUAL(e6[2],3); - - BOOST_CHECK_EQUAL(e7[0],1); - BOOST_CHECK_EQUAL(e7[1],1); - BOOST_CHECK_EQUAL(e7[2],2); - BOOST_CHECK_EQUAL(e7[3],3); - - BOOST_CHECK_EQUAL(e8[0],1); - BOOST_CHECK_EQUAL(e8[1],2); - BOOST_CHECK_EQUAL(e8[2],3); - BOOST_CHECK_EQUAL(e8[3],1); - BOOST_CHECK_EQUAL(e8[4],1); - - BOOST_CHECK_EQUAL(e9[0],4); - BOOST_CHECK_EQUAL(e9[1],2); - BOOST_CHECK_EQUAL(e9[2],3); - - BOOST_CHECK_EQUAL(e10[0],4); - BOOST_CHECK_EQUAL(e10[1],2); - BOOST_CHECK_EQUAL(e10[2],1); - BOOST_CHECK_EQUAL(e10[3],3); - - BOOST_CHECK_EQUAL(e11[0],4); - BOOST_CHECK_EQUAL(e11[1],2); - BOOST_CHECK_EQUAL(e11[2],1); - BOOST_CHECK_EQUAL(e11[3],3); - BOOST_CHECK_EQUAL(e11[4],1); - - BOOST_CHECK_EQUAL(e12[0],1); - BOOST_CHECK_EQUAL(e12[1],4); - BOOST_CHECK_EQUAL(e12[2],2); - BOOST_CHECK_EQUAL(e12[3],1); - BOOST_CHECK_EQUAL(e12[4],3); - BOOST_CHECK_EQUAL(e12[5],1); - - BOOST_CHECK_EQUAL(e13[0],1); - BOOST_CHECK_EQUAL(e13[1],4); - BOOST_CHECK_EQUAL(e13[2],1); - - BOOST_CHECK_EQUAL(e14[0],1); - BOOST_CHECK_EQUAL(e14[1],1); - BOOST_CHECK_EQUAL(e14[2],1); - BOOST_CHECK_EQUAL(e14[3],1); - - BOOST_CHECK_EQUAL(e15[0],1); - BOOST_CHECK_EQUAL(e15[1],4); - BOOST_CHECK_EQUAL(e15[2],1); - BOOST_CHECK_EQUAL(e15[3],1); - BOOST_CHECK_EQUAL(e15[4],1); - - BOOST_CHECK_EQUAL(e16[0],1); - BOOST_CHECK_EQUAL(e16[1],1); - BOOST_CHECK_EQUAL(e16[2],2); - BOOST_CHECK_EQUAL(e16[3],1); - BOOST_CHECK_EQUAL(e16[4],1); - BOOST_CHECK_EQUAL(e16[5],1); - - BOOST_CHECK_EQUAL(e17[0],1); - BOOST_CHECK_EQUAL(e17[1],1); - BOOST_CHECK_EQUAL(e17[2],2); - BOOST_CHECK_EQUAL(e17[3],3); - BOOST_CHECK_EQUAL(e17[4],1); - BOOST_CHECK_EQUAL(e17[5],1); - -} - -BOOST_FIXTURE_TEST_CASE(test_extents_is, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("query")) -{ - BOOST_REQUIRE_EQUAL(extents.size(),18); - - auto e0 = extents[ 0]; // {} - auto e1 = extents[ 1]; // {1,1} - auto e2 = extents[ 2]; // {1,2} - auto e3 = extents[ 3]; // {2,1} - auto e4 = extents[ 4]; // {2,3} - auto e5 = extents[ 5]; // {2,3,1} - auto e6 = extents[ 6]; // {1,2,3} - auto e7 = extents[ 7]; // {1,1,2,3} - auto e8 = extents[ 8]; // {1,2,3,1,1} - auto e9 = extents[ 9]; // {4,2,3} - auto e10 = extents[10]; // {4,2,1,3} - auto e11 = extents[11]; // {4,2,1,3,1} - auto e12 = extents[12]; // {1,4,2,1,3,1} - auto e13 = extents[13]; // {1,4,1} - auto e14 = extents[14]; // {1,1,1,1} - auto e15 = extents[15]; // {1,4,1,1,1} - auto e16 = extents[16]; // {1,1,2,1,1,1} - auto e17 = extents[17]; // {1,1,2,3,1,1} - - BOOST_CHECK( e0.empty ( )); - BOOST_CHECK( ! is_scalar(e0)); - BOOST_CHECK( ! is_vector(e0)); - BOOST_CHECK( ! is_matrix(e0)); - BOOST_CHECK( ! is_tensor(e0)); - - BOOST_CHECK( ! e1.empty ( ) ); - BOOST_CHECK( is_scalar(e1) ); - BOOST_CHECK( ! is_vector(e1) ); - BOOST_CHECK( ! is_matrix(e1) ); - BOOST_CHECK( ! is_tensor(e1) ); - - BOOST_CHECK( ! e2.empty ( ) ); - BOOST_CHECK( ! is_scalar(e2) ); - BOOST_CHECK( is_vector(e2) ); - BOOST_CHECK( ! is_matrix(e2) ); - BOOST_CHECK( ! is_tensor(e2) ); - - BOOST_CHECK( ! e3.empty ( ) ); - BOOST_CHECK( ! is_scalar(e3) ); - BOOST_CHECK( is_vector(e3) ); - BOOST_CHECK( ! is_matrix(e3) ); - BOOST_CHECK( ! is_tensor(e3) ); - - BOOST_CHECK( ! e4.empty ( ) ); - BOOST_CHECK( ! is_scalar(e4) ); - BOOST_CHECK( ! is_vector(e4) ); - BOOST_CHECK( is_matrix(e4) ); - BOOST_CHECK( ! is_tensor(e4) ); - - BOOST_CHECK( ! e5.empty ( ) ); - BOOST_CHECK( ! is_scalar(e5) ); - BOOST_CHECK( ! is_vector(e5) ); - BOOST_CHECK( is_matrix(e5) ); - BOOST_CHECK( ! is_tensor(e5) ); - - BOOST_CHECK( ! e6.empty ( ) ); - BOOST_CHECK( ! is_scalar(e6) ); - BOOST_CHECK( ! is_vector(e6) ); - BOOST_CHECK( ! is_matrix(e6) ); - BOOST_CHECK( is_tensor(e6) ); - - BOOST_CHECK( ! e7.empty ( ) ); - BOOST_CHECK( ! is_scalar(e7) ); - BOOST_CHECK( ! is_vector(e7) ); - BOOST_CHECK( ! is_matrix(e7) ); - BOOST_CHECK( is_tensor(e7) ); - - BOOST_CHECK( ! e8.empty ( ) ); - BOOST_CHECK( ! is_scalar(e8) ); - BOOST_CHECK( ! is_vector(e8) ); - BOOST_CHECK( ! is_matrix(e8) ); - BOOST_CHECK( is_tensor(e8) ); - - BOOST_CHECK( ! e9.empty ( ) ); - BOOST_CHECK( ! is_scalar(e9) ); - BOOST_CHECK( ! is_vector(e9) ); - BOOST_CHECK( ! is_matrix(e9) ); - BOOST_CHECK( is_tensor(e9) ); - - BOOST_CHECK( ! e10.empty( ) ); - BOOST_CHECK( ! is_scalar(e10) ); - BOOST_CHECK( ! is_vector(e10) ); - BOOST_CHECK( ! is_matrix(e10) ); - BOOST_CHECK( is_tensor(e10) ); - - BOOST_CHECK( ! e11.empty( ) ); - BOOST_CHECK( ! is_scalar(e11) ); - BOOST_CHECK( ! is_vector(e11) ); - BOOST_CHECK( ! is_matrix(e11) ); - BOOST_CHECK( is_tensor(e11) ); - - BOOST_CHECK( ! e12.empty( ) ); - BOOST_CHECK( ! is_scalar(e12) ); - BOOST_CHECK( ! is_vector(e12) ); - BOOST_CHECK( ! is_matrix(e12) ); - BOOST_CHECK( is_tensor(e12) ); - - BOOST_CHECK( ! e13.empty( ) ); - BOOST_CHECK( ! is_scalar(e13) ); - BOOST_CHECK( is_vector(e13) ); - BOOST_CHECK( ! is_matrix(e13) ); - BOOST_CHECK( ! is_tensor(e13) ); - - BOOST_CHECK( ! e14.empty( ) ); - BOOST_CHECK( is_scalar(e14) ); - BOOST_CHECK( ! is_vector(e14) ); - BOOST_CHECK( ! is_matrix(e14) ); - BOOST_CHECK( ! is_tensor(e14) ); - - BOOST_CHECK( ! e15.empty( ) ); - BOOST_CHECK( ! is_scalar(e15) ); - BOOST_CHECK( is_vector(e15) ); - BOOST_CHECK( ! is_matrix(e15) ); - BOOST_CHECK( ! is_tensor(e15) ); - - BOOST_CHECK( ! e16.empty( ) ); - BOOST_CHECK( ! is_scalar(e16) ); - BOOST_CHECK( ! is_vector(e16) ); - BOOST_CHECK( ! is_matrix(e16) ); - BOOST_CHECK( is_tensor(e16) ); - - BOOST_CHECK( ! e17.empty( ) ); - BOOST_CHECK( ! is_scalar(e17) ); - BOOST_CHECK( ! is_vector(e17) ); - BOOST_CHECK( ! is_matrix(e17) ); - BOOST_CHECK( is_tensor(e17) ); -} - -BOOST_FIXTURE_TEST_CASE(test_extents_squeeze, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("squeeze")) -{ - BOOST_REQUIRE_EQUAL(extents.size(),18); - - auto e0 = squeeze(extents[ 0]); // {} - auto e1 = squeeze(extents[ 1]); // {1,1} - auto e2 = squeeze(extents[ 2]); // {1,2} - auto e3 = squeeze(extents[ 3]); // {2,1} - - auto e4 = squeeze(extents[ 4]); // {2,3} - auto e5 = squeeze(extents[ 5]); // {2,3} - auto e6 = squeeze(extents[ 6]); // {2,3} - auto e7 = squeeze(extents[ 7]); // {2,3} - auto e8 = squeeze(extents[ 8]); // {2,3} - - auto e9 = squeeze(extents[ 9]); // {4,2,3} - auto e10 = squeeze(extents[10]); // {4,2,3} - auto e11 = squeeze(extents[11]); // {4,2,3} - auto e12 = squeeze(extents[12]); // {4,2,3} - - auto e13 = squeeze(extents[13]); // {1,4} - auto e14 = squeeze(extents[14]); // {1,1} - auto e15 = squeeze(extents[15]); // {1,4} - auto e16 = squeeze(extents[16]); // {2,1} - auto e17 = squeeze(extents[17]); // {2,3} - - BOOST_CHECK( (e0 == extents_type{} ) ); - BOOST_CHECK( (e1 == extents_type{1,1}) ); - BOOST_CHECK( (e2 == extents_type{1,2}) ); - BOOST_CHECK( (e3 == extents_type{2,1}) ); - - BOOST_CHECK( (e4 == extents_type{2,3}) ); - BOOST_CHECK( (e5 == extents_type{2,3}) ); - BOOST_CHECK( (e6 == extents_type{2,3}) ); - BOOST_CHECK( (e7 == extents_type{2,3}) ); - BOOST_CHECK( (e8 == extents_type{2,3}) ); - - BOOST_CHECK( (e9 == extents_type{4,2,3}) ); - BOOST_CHECK( (e10 == extents_type{4,2,3}) ); - BOOST_CHECK( (e11 == extents_type{4,2,3}) ); - BOOST_CHECK( (e12 == extents_type{4,2,3}) ); - - BOOST_CHECK( (e13 == extents_type{1,4}) ); - BOOST_CHECK( (e14 == extents_type{1,1}) ); - BOOST_CHECK( (e15 == extents_type{1,4}) ); - BOOST_CHECK( (e16 == extents_type{2,1}) ); - BOOST_CHECK( (e17 == extents_type{2,3}) ); - -} - -BOOST_FIXTURE_TEST_CASE(test_extents_comparison, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("compare")) -{ - - using namespace boost::numeric; - - auto s0 = ublas::static_extents<>{}; - auto s1 = ublas::static_extents<1,1>{}; - auto s2 = ublas::static_extents<1,4,2,1,3,1>{}; - auto s3 = ublas::static_extents<1,4,2,1,1,1>{}; - - auto d0 = ublas::extents<0>{}; - auto d1 = ublas::extents<2>{1,1}; - auto d2 = ublas::extents<6>{1,4,2,1,3,1}; - auto d3 = ublas::extents<6>{1,4,2,1,1,1}; - - auto e0 = extents[ 0]; // {} - auto e1 = extents[ 1]; // {1,1} - auto e2 = extents[12]; // {1,4,2,1,3,1} - - // static_extents<...> == extents<> - BOOST_TEST( s0 == e0 ); - BOOST_TEST( s1 == e1 ); - BOOST_TEST( s2 == e2 ); - - BOOST_TEST( e0 == s0 ); - BOOST_TEST( e1 == s1 ); - BOOST_TEST( e2 == s2 ); - - BOOST_TEST( s0 != e1 ); - BOOST_TEST( s0 != e2 ); - BOOST_TEST( s1 != e0 ); - BOOST_TEST( s1 != e2 ); - BOOST_TEST( s2 != e0 ); - BOOST_TEST( s2 != e1 ); - BOOST_TEST( s3 != e0 ); - BOOST_TEST( s3 != e1 ); - BOOST_TEST( s3 != e2 ); - - BOOST_TEST( e1 != s0 ); - BOOST_TEST( e2 != s0 ); - BOOST_TEST( e0 != s1 ); - BOOST_TEST( e2 != s1 ); - BOOST_TEST( e0 != s2 ); - BOOST_TEST( e1 != s2 ); - BOOST_TEST( e0 != s3 ); - BOOST_TEST( e1 != s3 ); - BOOST_TEST( e2 != s3 ); - - // extents == extents<> - BOOST_TEST( d0 == e0 ); - BOOST_TEST( d1 == e1 ); - BOOST_TEST( d2 == e2 ); - - BOOST_TEST( e0 == d0 ); - BOOST_TEST( e1 == d1 ); - BOOST_TEST( e2 == d2 ); - - BOOST_TEST( d0 != e1 ); - BOOST_TEST( d0 != e2 ); - BOOST_TEST( d1 != e0 ); - BOOST_TEST( d1 != e2 ); - BOOST_TEST( d2 != e0 ); - BOOST_TEST( d2 != e1 ); - BOOST_TEST( d3 != e0 ); - BOOST_TEST( d3 != e1 ); - BOOST_TEST( d3 != e2 ); - - BOOST_TEST( e1 != d0 ); - BOOST_TEST( e2 != d0 ); - BOOST_TEST( e0 != d1 ); - BOOST_TEST( e2 != d1 ); - BOOST_TEST( e0 != d2 ); - BOOST_TEST( e1 != d2 ); - BOOST_TEST( e0 != d3 ); - BOOST_TEST( e1 != d3 ); - BOOST_TEST( e2 != d3 ); - - // static_extents<...> == extents - - BOOST_TEST( s0 == d0 ); - BOOST_TEST( s1 == d1 ); - BOOST_TEST( s2 == d2 ); - BOOST_TEST( s3 == d3 ); - - BOOST_TEST( d0 == s0 ); - BOOST_TEST( d1 == s1 ); - BOOST_TEST( d2 == s2 ); - BOOST_TEST( d3 == s3 ); - - BOOST_TEST( s0 != d1 ); - BOOST_TEST( s0 != d2 ); - BOOST_TEST( s0 != d3 ); - BOOST_TEST( s1 != d0 ); - BOOST_TEST( s1 != d2 ); - BOOST_TEST( s1 != d3 ); - BOOST_TEST( s2 != d0 ); - BOOST_TEST( s2 != d1 ); - BOOST_TEST( s2 != d3 ); - BOOST_TEST( s3 != d0 ); - BOOST_TEST( s3 != d1 ); - BOOST_TEST( s3 != d2 ); - - BOOST_TEST( d1 != s0 ); - BOOST_TEST( d2 != s0 ); - BOOST_TEST( d3 != s0 ); - BOOST_TEST( d0 != s1 ); - BOOST_TEST( d2 != s1 ); - BOOST_TEST( d3 != s1 ); - BOOST_TEST( d0 != s2 ); - BOOST_TEST( d1 != s2 ); - BOOST_TEST( d3 != s2 ); - BOOST_TEST( d0 != s3 ); - BOOST_TEST( d1 != s3 ); - BOOST_TEST( d2 != s3 ); - -} - - -BOOST_FIXTURE_TEST_CASE(test_extents_product, fixture, *boost::unit_test::label("extents") *boost::unit_test::label("product")) -{ - - auto e0 = product(extents[ 0]); // {} - auto e1 = product(extents[ 1]); // {1,1} - auto e2 = product(extents[ 2]); // {1,2} - auto e3 = product(extents[ 3]); // {2,1} - auto e4 = product(extents[ 4]); // {2,3} - auto e5 = product(extents[ 5]); // {2,3,1} - auto e6 = product(extents[ 6]); // {1,2,3} - auto e7 = product(extents[ 7]); // {1,1,2,3} - auto e8 = product(extents[ 8]); // {1,2,3,1,1} - auto e9 = product(extents[ 9]); // {4,2,3} - auto e10 = product(extents[10]); // {4,2,1,3} - auto e11 = product(extents[11]); // {4,2,1,3,1} - auto e12 = product(extents[12]); // {1,4,2,1,3,1} - auto e13 = product(extents[13]); // {1,4,1} - auto e14 = product(extents[14]); // {1,1,1,1} - auto e15 = product(extents[15]); // {1,4,1,1,1} - auto e16 = product(extents[16]); // {1,1,2,1,1,1} - auto e17 = product(extents[17]); // {1,1,2,3,1,1} - - BOOST_CHECK_EQUAL( e0 , 0 ); - BOOST_CHECK_EQUAL( e1 , 1 ); - BOOST_CHECK_EQUAL( e2 , 2 ); - BOOST_CHECK_EQUAL( e3 , 2 ); - BOOST_CHECK_EQUAL( e4 , 6 ); - BOOST_CHECK_EQUAL( e5 , 6 ); - BOOST_CHECK_EQUAL( e6 , 6 ); - BOOST_CHECK_EQUAL( e7 , 6 ); - BOOST_CHECK_EQUAL( e8 , 6 ); - BOOST_CHECK_EQUAL( e9 , 24 ); - BOOST_CHECK_EQUAL( e10, 24 ); - BOOST_CHECK_EQUAL( e11, 24 ); - BOOST_CHECK_EQUAL( e12, 24 ); - BOOST_CHECK_EQUAL( e13, 4 ); - BOOST_CHECK_EQUAL( e14, 1 ); - BOOST_CHECK_EQUAL( e15, 4 ); - BOOST_CHECK_EQUAL( e16, 2 ); - BOOST_CHECK_EQUAL( e17, 6 ); - - -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_extents_dynamic.cpp b/test/tensor/test_extents_dynamic.cpp new file mode 100644 index 000000000..fe4761b88 --- /dev/null +++ b/test/tensor/test_extents_dynamic.cpp @@ -0,0 +1,190 @@ +// +// Copyright (c) 2018 Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#include +#include + +BOOST_AUTO_TEST_SUITE ( test_extents_dynamic ) + + +struct fixture +{ + using extents = boost::numeric::ublas::extents<>; + +// static inline auto n = extents{}; + static inline auto n1 = extents{1}; + static inline auto n2 = extents{2}; + static inline auto n11 = extents{1,1}; + static inline auto n12 = extents{1,2}; + static inline auto n21 = extents{2,1}; + static inline auto n22 = extents{2,2}; + static inline auto n32 = extents{3,2}; + static inline auto n111 = extents{1,1,1}; + static inline auto n211 = extents{2,1,1}; + static inline auto n121 = extents{1,2,1}; + static inline auto n112 = extents{1,1,2}; + static inline auto n123 = extents{1,2,3}; + static inline auto n321 = extents{3,2,1}; + static inline auto n213 = extents{2,1,3}; + static inline auto n432 = extents{4,3,2}; +}; + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_empty, + fixture, + *boost::unit_test::label("dynamic_extents") *boost::unit_test::label("empty")) +{ + namespace ublas = boost::numeric::ublas; + +// BOOST_CHECK( ublas::empty(n )); + BOOST_CHECK(!ublas::empty(n1 )); + BOOST_CHECK(!ublas::empty(n2 )); + BOOST_CHECK(!ublas::empty(n11 )); + BOOST_CHECK(!ublas::empty(n12 )); + BOOST_CHECK(!ublas::empty(n21 )); + BOOST_CHECK(!ublas::empty(n22 )); + BOOST_CHECK(!ublas::empty(n32 )); + BOOST_CHECK(!ublas::empty(n111)); + BOOST_CHECK(!ublas::empty(n211)); + BOOST_CHECK(!ublas::empty(n121)); + BOOST_CHECK(!ublas::empty(n112)); + BOOST_CHECK(!ublas::empty(n123)); + BOOST_CHECK(!ublas::empty(n321)); + BOOST_CHECK(!ublas::empty(n213)); + BOOST_CHECK(!ublas::empty(n432)); + + BOOST_CHECK_THROW( extents({1,1,0}), std::invalid_argument); + BOOST_CHECK_THROW( extents({1,0}) , std::invalid_argument); + BOOST_CHECK_THROW( extents({0} ) , std::invalid_argument); + BOOST_CHECK_THROW( extents({0,1}) , std::invalid_argument); +} + + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_size, + fixture, + *boost::unit_test::label("dynamic_extents") *boost::unit_test::label("size")) +{ + namespace ublas = boost::numeric::ublas; + + +// BOOST_CHECK_EQUAL(ublas::size(n ),0); + BOOST_CHECK_EQUAL(ublas::size(n1 ),1); + BOOST_CHECK_EQUAL(ublas::size(n2 ),1); + BOOST_CHECK_EQUAL(ublas::size(n11 ),2); + BOOST_CHECK_EQUAL(ublas::size(n12 ),2); + BOOST_CHECK_EQUAL(ublas::size(n21 ),2); + BOOST_CHECK_EQUAL(ublas::size(n22 ),2); + BOOST_CHECK_EQUAL(ublas::size(n32 ),2); + BOOST_CHECK_EQUAL(ublas::size(n111),3); + BOOST_CHECK_EQUAL(ublas::size(n211),3); + BOOST_CHECK_EQUAL(ublas::size(n121),3); + BOOST_CHECK_EQUAL(ublas::size(n112),3); + BOOST_CHECK_EQUAL(ublas::size(n123),3); + BOOST_CHECK_EQUAL(ublas::size(n321),3); + BOOST_CHECK_EQUAL(ublas::size(n213),3); + BOOST_CHECK_EQUAL(ublas::size(n432),3); +} + + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_at_read, + fixture, + *boost::unit_test::label("dynamic_extents") *boost::unit_test::label("at_read")) +{ + BOOST_CHECK_EQUAL(n1 .at(0),1); + BOOST_CHECK_EQUAL(n2 .at(0),2); + + BOOST_CHECK_EQUAL(n11 .at(0),1); + BOOST_CHECK_EQUAL(n11 .at(1),1); + + BOOST_CHECK_EQUAL(n12 .at(0),1); + BOOST_CHECK_EQUAL(n12 .at(1),2); + + BOOST_CHECK_EQUAL(n21 .at(0),2); + BOOST_CHECK_EQUAL(n21 .at(1),1); + + BOOST_CHECK_EQUAL(n22 .at(0),2); + BOOST_CHECK_EQUAL(n22 .at(1),2); + + BOOST_CHECK_EQUAL(n32 .at(0),3); + BOOST_CHECK_EQUAL(n32 .at(1),2); + + BOOST_CHECK_EQUAL(n432.at(0),4); + BOOST_CHECK_EQUAL(n432.at(1),3); + BOOST_CHECK_EQUAL(n432.at(2),2); + + +// BOOST_CHECK_THROW( (void)n .at(0), std::out_of_range); + BOOST_CHECK_THROW( (void)n32.at(2), std::out_of_range); + BOOST_CHECK_THROW( (void)n32.at(5), std::out_of_range); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_at_write, + fixture, + *boost::unit_test::label("dynamic_extents") *boost::unit_test::label("at_write")) +{ + auto n3 = extents{1}; + n3 = extents{3}; + BOOST_CHECK_EQUAL(n3.at(0),3); + + auto n34 = extents{1,1}; + n34 = extents{3,4}; + BOOST_CHECK_EQUAL(n34.at(0),3); + BOOST_CHECK_EQUAL(n34.at(1),4); + + + auto n345 = extents{1,1,1}; + n345 = extents{3,4,5}; + BOOST_CHECK_EQUAL(n345.at(0),3); + BOOST_CHECK_EQUAL(n345.at(1),4); + BOOST_CHECK_EQUAL(n345.at(2),5); + + + auto n5432 = extents{1,1,1,1}; + n5432 = extents{5,4,3,2}; + BOOST_CHECK_EQUAL(n5432.at(0),5); + BOOST_CHECK_EQUAL(n5432.at(1),4); + BOOST_CHECK_EQUAL(n5432.at(2),3); + BOOST_CHECK_EQUAL(n5432.at(3),2); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_operator_access_read, + fixture, + *boost::unit_test::label("dynamic_extents") *boost::unit_test::label("operator_access_read")) +{ + BOOST_CHECK_EQUAL(n1 [0],1); + BOOST_CHECK_EQUAL(n2 [0],2); + + BOOST_CHECK_EQUAL(n11 [0],1); + BOOST_CHECK_EQUAL(n11 [1],1); + + BOOST_CHECK_EQUAL(n12 [0],1); + BOOST_CHECK_EQUAL(n12 [1],2); + + BOOST_CHECK_EQUAL(n21 [0],2); + BOOST_CHECK_EQUAL(n21 [1],1); + + BOOST_CHECK_EQUAL(n22 [0],2); + BOOST_CHECK_EQUAL(n22 [1],2); + + BOOST_CHECK_EQUAL(n32 [0],3); + BOOST_CHECK_EQUAL(n32 [1],2); + + BOOST_CHECK_EQUAL(n432[0],4); + BOOST_CHECK_EQUAL(n432[1],3); + BOOST_CHECK_EQUAL(n432[2],2); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_extents_dynamic_rank_static.cpp b/test/tensor/test_extents_dynamic_rank_static.cpp new file mode 100644 index 000000000..c7d4a6a9d --- /dev/null +++ b/test/tensor/test_extents_dynamic_rank_static.cpp @@ -0,0 +1,155 @@ +// +// Copyright (c) 2021 Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// + +#include +#include + +BOOST_AUTO_TEST_SUITE ( test_shape_dynamic_static_rank ) + + +struct fixture +{ + template + using shape_t = boost::numeric::ublas::extents; + +// static inline auto n = shape_t<0>{}; + static inline auto n1 = shape_t<1>{1}; + static inline auto n2 = shape_t<1>{2}; + static inline auto n11 = shape_t<2>{1,1}; + static inline auto n12 = shape_t<2>{1,2}; + static inline auto n21 = shape_t<2>{2,1}; + static inline auto n22 = shape_t<2>{2,2}; + static inline auto n32 = shape_t<2>{3,2}; + static inline auto n111 = shape_t<3>{1,1,1}; + static inline auto n211 = shape_t<3>{2,1,1}; + static inline auto n121 = shape_t<3>{1,2,1}; + static inline auto n112 = shape_t<3>{1,1,2}; + static inline auto n123 = shape_t<3>{1,2,3}; + static inline auto n321 = shape_t<3>{3,2,1}; + static inline auto n213 = shape_t<3>{2,1,3}; + static inline auto n432 = shape_t<3>{4,3,2}; +}; + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_static_empty, + fixture, + *boost::unit_test::label("dynamic_extents_rank_static") *boost::unit_test::label("empty")) +{ + namespace ublas = boost::numeric::ublas; +// BOOST_CHECK( ublas::empty(n )); + BOOST_CHECK(!ublas::empty(n1 )); + BOOST_CHECK(!ublas::empty(n2 )); + BOOST_CHECK(!ublas::empty(n11 )); + BOOST_CHECK(!ublas::empty(n12 )); + BOOST_CHECK(!ublas::empty(n21 )); + BOOST_CHECK(!ublas::empty(n22 )); + BOOST_CHECK(!ublas::empty(n32 )); + BOOST_CHECK(!ublas::empty(n111)); + BOOST_CHECK(!ublas::empty(n211)); + BOOST_CHECK(!ublas::empty(n121)); + BOOST_CHECK(!ublas::empty(n112)); + BOOST_CHECK(!ublas::empty(n123)); + BOOST_CHECK(!ublas::empty(n321)); + BOOST_CHECK(!ublas::empty(n213)); + BOOST_CHECK(!ublas::empty(n432)); + + BOOST_CHECK_THROW( shape_t<3>({1,1,0}), std::invalid_argument); + BOOST_CHECK_THROW( shape_t<2>({1,0}), std::invalid_argument); + BOOST_CHECK_THROW( shape_t<1>({0} ), std::invalid_argument); + BOOST_CHECK_THROW( shape_t<2>({0,1}), std::invalid_argument); +} + + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_static_size, + fixture, + *boost::unit_test::label("dynamic_extents_rank_static") *boost::unit_test::label("size")) +{ + namespace ublas = boost::numeric::ublas; + +// BOOST_CHECK_EQUAL(ublas::size(n ),0); + BOOST_CHECK_EQUAL(ublas::size(n1 ),1); + BOOST_CHECK_EQUAL(ublas::size(n2 ),1); + BOOST_CHECK_EQUAL(ublas::size(n11 ),2); + BOOST_CHECK_EQUAL(ublas::size(n12 ),2); + BOOST_CHECK_EQUAL(ublas::size(n21 ),2); + BOOST_CHECK_EQUAL(ublas::size(n22 ),2); + BOOST_CHECK_EQUAL(ublas::size(n32 ),2); + BOOST_CHECK_EQUAL(ublas::size(n111),3); + BOOST_CHECK_EQUAL(ublas::size(n211),3); + BOOST_CHECK_EQUAL(ublas::size(n121),3); + BOOST_CHECK_EQUAL(ublas::size(n112),3); + BOOST_CHECK_EQUAL(ublas::size(n123),3); + BOOST_CHECK_EQUAL(ublas::size(n321),3); + BOOST_CHECK_EQUAL(ublas::size(n213),3); + BOOST_CHECK_EQUAL(ublas::size(n432),3); +} + + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_static_at_read, + fixture, + *boost::unit_test::label("dynamic_extents_rank_static") *boost::unit_test::label("at_read")) +{ + BOOST_CHECK_EQUAL(n1 .at(0),1); + BOOST_CHECK_EQUAL(n2 .at(0),2); + + BOOST_CHECK_EQUAL(n11 .at(0),1); + BOOST_CHECK_EQUAL(n11 .at(1),1); + + BOOST_CHECK_EQUAL(n12 .at(0),1); + BOOST_CHECK_EQUAL(n12 .at(1),2); + + BOOST_CHECK_EQUAL(n21 .at(0),2); + BOOST_CHECK_EQUAL(n21 .at(1),1); + + BOOST_CHECK_EQUAL(n22 .at(0),2); + BOOST_CHECK_EQUAL(n22 .at(1),2); + + BOOST_CHECK_EQUAL(n32 .at(0),3); + BOOST_CHECK_EQUAL(n32 .at(1),2); + + BOOST_CHECK_EQUAL(n432.at(0),4); + BOOST_CHECK_EQUAL(n432.at(1),3); + BOOST_CHECK_EQUAL(n432.at(2),2); + + +// BOOST_CHECK_THROW( (void)n .at(0), std::out_of_range); + BOOST_CHECK_THROW( (void)n32.at(2), std::out_of_range); + BOOST_CHECK_THROW( (void)n32.at(5), std::out_of_range); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_static_operator_access_read, + fixture, + *boost::unit_test::label("dynamic_extents_rank_static") *boost::unit_test::label("operator_access_read")) +{ + BOOST_CHECK_EQUAL(n1 [0],1); + BOOST_CHECK_EQUAL(n2 [0],2); + + BOOST_CHECK_EQUAL(n11 [0],1); + BOOST_CHECK_EQUAL(n11 [1],1); + + BOOST_CHECK_EQUAL(n12 [0],1); + BOOST_CHECK_EQUAL(n12 [1],2); + + BOOST_CHECK_EQUAL(n21 [0],2); + BOOST_CHECK_EQUAL(n21 [1],1); + + BOOST_CHECK_EQUAL(n22 [0],2); + BOOST_CHECK_EQUAL(n22 [1],2); + + BOOST_CHECK_EQUAL(n32 [0],3); + BOOST_CHECK_EQUAL(n32 [1],2); + + BOOST_CHECK_EQUAL(n432[0],4); + BOOST_CHECK_EQUAL(n432[1],3); + BOOST_CHECK_EQUAL(n432[2],2); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_extents_functions.cpp b/test/tensor/test_extents_functions.cpp new file mode 100644 index 000000000..868afd26e --- /dev/null +++ b/test/tensor/test_extents_functions.cpp @@ -0,0 +1,634 @@ +// +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#include +#include + +#include +#include + + + +BOOST_AUTO_TEST_SUITE(test_shape_functions) + +struct fixture_extents_dynamic_rank +{ + using shape_t = boost::numeric::ublas::extents<>; + + static inline auto n = shape_t{}; + static inline auto n1 = shape_t{1}; + static inline auto n2 = shape_t{2}; + static inline auto n11 = shape_t{1,1}; + static inline auto n12 = shape_t{1,2}; + static inline auto n21 = shape_t{2,1}; + static inline auto n22 = shape_t{2,2}; + static inline auto n32 = shape_t{3,2}; + static inline auto n111 = shape_t{1,1,1}; + static inline auto n211 = shape_t{2,1,1}; + static inline auto n121 = shape_t{1,2,1}; + static inline auto n112 = shape_t{1,1,2}; + static inline auto n123 = shape_t{1,2,3}; + static inline auto n321 = shape_t{3,2,1}; + static inline auto n213 = shape_t{2,1,3}; + static inline auto n432 = shape_t{4,3,2}; +}; + +struct fixture_extents_static_rank +{ + template + using extents_static_rank = boost::numeric::ublas::extents; + + static constexpr inline auto n = extents_static_rank<0>{}; + static constexpr inline auto n1 = extents_static_rank<1>{1}; + static constexpr inline auto n2 = extents_static_rank<1>{2}; + static constexpr inline auto n11 = extents_static_rank<2>{{1,1}}; + static constexpr inline auto n12 = extents_static_rank<2>{{1,2}}; + static constexpr inline auto n21 = extents_static_rank<2>{{2,1}}; + static constexpr inline auto n22 = extents_static_rank<2>{{2,2}}; + static constexpr inline auto n32 = extents_static_rank<2>{{3,2}}; + static constexpr inline auto n111 = extents_static_rank<3>{{1,1,1}}; + static constexpr inline auto n211 = extents_static_rank<3>{{2,1,1}}; + static constexpr inline auto n121 = extents_static_rank<3>{{1,2,1}}; + static constexpr inline auto n112 = extents_static_rank<3>{{1,1,2}}; + static constexpr inline auto n123 = extents_static_rank<3>{{1,2,3}}; + static constexpr inline auto n321 = extents_static_rank<3>{{3,2,1}}; + static constexpr inline auto n213 = extents_static_rank<3>{{2,1,3}}; + static constexpr inline auto n432 = extents_static_rank<3>{{4,3,2}}; + + static constexpr inline auto tuple = std::make_tuple( n,n1,n2,n11,n12,n21,n22,n32,n111,n211,n121,n112,n123,n321,n213,n432 ); + +}; + + + +struct fixture_extents_static +{ + template + using extents_static = boost::numeric::ublas::extents; + + static inline auto n = extents_static<> {}; + static inline auto n1 = extents_static<1> {}; + static inline auto n2 = extents_static<2> {}; + static inline auto n11 = extents_static<1,1> {}; + static inline auto n12 = extents_static<1,2> {}; + static inline auto n21 = extents_static<2,1> {}; + static inline auto n22 = extents_static<2,2> {}; + static inline auto n32 = extents_static<3,2> {}; + static inline auto n111 = extents_static<1,1,1> {}; + static inline auto n211 = extents_static<2,1,1> {}; + static inline auto n121 = extents_static<1,2,1> {}; + static inline auto n112 = extents_static<1,1,2> {}; + static inline auto n123 = extents_static<1,2,3> {}; + static inline auto n321 = extents_static<3,2,1> {}; + static inline auto n213 = extents_static<2,1,3> {}; + static inline auto n432 = extents_static<4,3,2> {}; + + + +}; + + + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_is_scalar, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("is_scalar")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_scalar( n )); + BOOST_CHECK ( ub::is_scalar( n1 )); + BOOST_CHECK ( !ub::is_scalar( n2 )); + BOOST_CHECK ( ub::is_scalar( n11 )); + BOOST_CHECK ( !ub::is_scalar( n12 )); + BOOST_CHECK ( !ub::is_scalar( n21 )); + BOOST_CHECK ( !ub::is_scalar( n22 )); + BOOST_CHECK ( !ub::is_scalar( n32 )); + BOOST_CHECK ( ub::is_scalar( n111 )); + BOOST_CHECK ( !ub::is_scalar( n211 )); + BOOST_CHECK ( !ub::is_scalar( n121 )); + BOOST_CHECK ( !ub::is_scalar( n112 )); + BOOST_CHECK ( !ub::is_scalar( n123 )); + BOOST_CHECK ( !ub::is_scalar( n321 )); + BOOST_CHECK ( !ub::is_scalar( n213 )); + BOOST_CHECK ( !ub::is_scalar( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_is_scalar, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("is_scalar")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_scalar( n )); + BOOST_CHECK ( ub::is_scalar( n1 )); + BOOST_CHECK ( !ub::is_scalar( n2 )); + BOOST_CHECK ( ub::is_scalar( n11 )); + BOOST_CHECK ( !ub::is_scalar( n12 )); + BOOST_CHECK ( !ub::is_scalar( n21 )); + BOOST_CHECK ( !ub::is_scalar( n22 )); + BOOST_CHECK ( !ub::is_scalar( n32 )); + BOOST_CHECK ( ub::is_scalar( n111 )); + BOOST_CHECK ( !ub::is_scalar( n211 )); + BOOST_CHECK ( !ub::is_scalar( n121 )); + BOOST_CHECK ( !ub::is_scalar( n112 )); + BOOST_CHECK ( !ub::is_scalar( n123 )); + BOOST_CHECK ( !ub::is_scalar( n321 )); + BOOST_CHECK ( !ub::is_scalar( n213 )); + BOOST_CHECK ( !ub::is_scalar( n432 )); + +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_is_scalar, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("is_scalar")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_scalar( n )); +//FIXME: BOOST_CHECK ( ub::is_scalar( n1 )); + BOOST_CHECK ( !ub::is_scalar( n2 )); + BOOST_CHECK ( ub::is_scalar( n11 )); + BOOST_CHECK ( !ub::is_scalar( n12 )); + BOOST_CHECK ( !ub::is_scalar( n21 )); + BOOST_CHECK ( !ub::is_scalar( n22 )); + BOOST_CHECK ( !ub::is_scalar( n32 )); + BOOST_CHECK ( ub::is_scalar( n111 )); + BOOST_CHECK ( !ub::is_scalar( n211 )); + BOOST_CHECK ( !ub::is_scalar( n121 )); + BOOST_CHECK ( !ub::is_scalar( n112 )); + BOOST_CHECK ( !ub::is_scalar( n123 )); + BOOST_CHECK ( !ub::is_scalar( n321 )); + BOOST_CHECK ( !ub::is_scalar( n213 )); + BOOST_CHECK ( !ub::is_scalar( n432 )); + +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_is_vector, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("is_vector")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_vector( n )); + BOOST_CHECK ( ub::is_vector( n1 )); + BOOST_CHECK ( ub::is_vector( n2 )); + BOOST_CHECK ( ub::is_vector( n11 )); + BOOST_CHECK ( ub::is_vector( n12 )); + BOOST_CHECK ( ub::is_vector( n21 )); + BOOST_CHECK ( !ub::is_vector( n22 )); + BOOST_CHECK ( !ub::is_vector( n32 )); + BOOST_CHECK ( ub::is_vector( n111 )); + BOOST_CHECK ( ub::is_vector( n211 )); + BOOST_CHECK ( ub::is_vector( n121 )); + BOOST_CHECK ( !ub::is_vector( n112 )); + BOOST_CHECK ( !ub::is_vector( n123 )); + BOOST_CHECK ( !ub::is_vector( n321 )); + BOOST_CHECK ( !ub::is_vector( n213 )); + BOOST_CHECK ( !ub::is_vector( n432 )); +} + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_is_vector, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("is_vector")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_vector( n )); + BOOST_CHECK ( ub::is_vector( n1 )); + BOOST_CHECK ( ub::is_vector( n2 )); + BOOST_CHECK ( ub::is_vector( n11 )); + BOOST_CHECK ( ub::is_vector( n12 )); + BOOST_CHECK ( ub::is_vector( n21 )); + BOOST_CHECK ( !ub::is_vector( n22 )); + BOOST_CHECK ( !ub::is_vector( n32 )); + BOOST_CHECK ( ub::is_vector( n111 )); + BOOST_CHECK ( ub::is_vector( n211 )); + BOOST_CHECK ( ub::is_vector( n121 )); + BOOST_CHECK ( !ub::is_vector( n112 )); + BOOST_CHECK ( !ub::is_vector( n123 )); + BOOST_CHECK ( !ub::is_vector( n321 )); + BOOST_CHECK ( !ub::is_vector( n213 )); + BOOST_CHECK ( !ub::is_vector( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_is_vector, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("is_vector")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_vector( n1 )); +//FIXME: BOOST_CHECK ( ub::is_vector( n2 )); + BOOST_CHECK ( ub::is_vector( n11 )); + BOOST_CHECK ( ub::is_vector( n12 )); + BOOST_CHECK ( ub::is_vector( n21 )); + BOOST_CHECK ( !ub::is_vector( n22 )); + BOOST_CHECK ( !ub::is_vector( n32 )); + BOOST_CHECK ( ub::is_vector( n111 )); + BOOST_CHECK ( ub::is_vector( n211 )); + BOOST_CHECK ( ub::is_vector( n121 )); + BOOST_CHECK ( !ub::is_vector( n112 )); + BOOST_CHECK ( !ub::is_vector( n123 )); + BOOST_CHECK ( !ub::is_vector( n321 )); + BOOST_CHECK ( !ub::is_vector( n213 )); + BOOST_CHECK ( !ub::is_vector( n432 )); +} + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_is_matrix, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("is_matrix")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_matrix( n )); + BOOST_CHECK ( ub::is_matrix( n1 )); + BOOST_CHECK ( ub::is_matrix( n2 )); + BOOST_CHECK ( ub::is_matrix( n11 )); + BOOST_CHECK ( ub::is_matrix( n12 )); + BOOST_CHECK ( ub::is_matrix( n21 )); + BOOST_CHECK ( ub::is_matrix( n22 )); + BOOST_CHECK ( ub::is_matrix( n32 )); + BOOST_CHECK ( ub::is_matrix( n111 )); + BOOST_CHECK ( ub::is_matrix( n211 )); + BOOST_CHECK ( ub::is_matrix( n121 )); + BOOST_CHECK ( !ub::is_matrix( n112 )); + BOOST_CHECK ( !ub::is_matrix( n123 )); + BOOST_CHECK ( ub::is_matrix( n321 )); + BOOST_CHECK ( !ub::is_matrix( n213 )); + BOOST_CHECK ( !ub::is_matrix( n432 )); +} + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_is_matrix, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("is_matrix")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_matrix( n )); + BOOST_CHECK ( ub::is_matrix( n1 )); + BOOST_CHECK ( ub::is_matrix( n2 )); + BOOST_CHECK ( ub::is_matrix( n11 )); + BOOST_CHECK ( ub::is_matrix( n12 )); + BOOST_CHECK ( ub::is_matrix( n21 )); + BOOST_CHECK ( ub::is_matrix( n22 )); + BOOST_CHECK ( ub::is_matrix( n32 )); + BOOST_CHECK ( ub::is_matrix( n111 )); + BOOST_CHECK ( ub::is_matrix( n211 )); + BOOST_CHECK ( ub::is_matrix( n121 )); + BOOST_CHECK ( !ub::is_matrix( n112 )); + BOOST_CHECK ( !ub::is_matrix( n123 )); + BOOST_CHECK ( ub::is_matrix( n321 )); + BOOST_CHECK ( !ub::is_matrix( n213 )); + BOOST_CHECK ( !ub::is_matrix( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_is_matrix, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("is_matrix")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_matrix( n )); +//FIXME: BOOST_CHECK ( !ub::is_matrix( n1 )); + BOOST_CHECK ( !ub::is_matrix( n2 )); + BOOST_CHECK ( ub::is_matrix( n11 )); + BOOST_CHECK ( ub::is_matrix( n12 )); + BOOST_CHECK ( ub::is_matrix( n21 )); + BOOST_CHECK ( ub::is_matrix( n22 )); + BOOST_CHECK ( ub::is_matrix( n32 )); + BOOST_CHECK ( ub::is_matrix( n111 )); + BOOST_CHECK ( ub::is_matrix( n211 )); + BOOST_CHECK ( ub::is_matrix( n121 )); + BOOST_CHECK ( !ub::is_matrix( n112 )); + BOOST_CHECK ( !ub::is_matrix( n123 )); + BOOST_CHECK ( ub::is_matrix( n321 )); + BOOST_CHECK ( !ub::is_matrix( n213 )); + BOOST_CHECK ( !ub::is_matrix( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_is_tensor, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("is_tensor")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_tensor( n )); + BOOST_CHECK ( !ub::is_tensor( n1 )); + BOOST_CHECK ( !ub::is_tensor( n2 )); + BOOST_CHECK ( !ub::is_tensor( n11 )); + BOOST_CHECK ( !ub::is_tensor( n12 )); + BOOST_CHECK ( !ub::is_tensor( n21 )); + BOOST_CHECK ( !ub::is_tensor( n22 )); + BOOST_CHECK ( !ub::is_tensor( n32 )); + BOOST_CHECK ( !ub::is_tensor( n111 )); + BOOST_CHECK ( !ub::is_tensor( n211 )); + BOOST_CHECK ( !ub::is_tensor( n121 )); + BOOST_CHECK ( ub::is_tensor( n112 )); + BOOST_CHECK ( ub::is_tensor( n123 )); + BOOST_CHECK ( !ub::is_tensor( n321 )); + BOOST_CHECK ( ub::is_tensor( n213 )); + BOOST_CHECK ( ub::is_tensor( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_is_tensor, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("is_tensor")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_tensor( n )); + BOOST_CHECK ( !ub::is_tensor( n1 )); + BOOST_CHECK ( !ub::is_tensor( n2 )); + BOOST_CHECK ( !ub::is_tensor( n11 )); + BOOST_CHECK ( !ub::is_tensor( n12 )); + BOOST_CHECK ( !ub::is_tensor( n21 )); + BOOST_CHECK ( !ub::is_tensor( n22 )); + BOOST_CHECK ( !ub::is_tensor( n32 )); + BOOST_CHECK ( !ub::is_tensor( n111 )); + BOOST_CHECK ( !ub::is_tensor( n211 )); + BOOST_CHECK ( !ub::is_tensor( n121 )); + BOOST_CHECK ( ub::is_tensor( n112 )); + BOOST_CHECK ( ub::is_tensor( n123 )); + BOOST_CHECK ( !ub::is_tensor( n321 )); + BOOST_CHECK ( ub::is_tensor( n213 )); + BOOST_CHECK ( ub::is_tensor( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_is_tensor, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("is_tensor")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( !ub::is_tensor( n )); +//FIXME: BOOST_CHECK ( !ub::is_tensor( n1 )); + BOOST_CHECK ( !ub::is_tensor( n2 )); + BOOST_CHECK ( !ub::is_tensor( n11 )); + BOOST_CHECK ( !ub::is_tensor( n12 )); + BOOST_CHECK ( !ub::is_tensor( n21 )); + BOOST_CHECK ( !ub::is_tensor( n22 )); + BOOST_CHECK ( !ub::is_tensor( n32 )); + BOOST_CHECK ( !ub::is_tensor( n111 )); + BOOST_CHECK ( !ub::is_tensor( n211 )); + BOOST_CHECK ( !ub::is_tensor( n121 )); + BOOST_CHECK ( ub::is_tensor( n112 )); + BOOST_CHECK ( ub::is_tensor( n123 )); + BOOST_CHECK ( !ub::is_tensor( n321 )); + BOOST_CHECK ( ub::is_tensor( n213 )); + BOOST_CHECK ( ub::is_tensor( n432 )); +} + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_is_valid, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("is_valid")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( ub::is_valid( n1 )); + BOOST_CHECK ( ub::is_valid( n2 )); + BOOST_CHECK ( ub::is_valid( n11 )); + BOOST_CHECK ( ub::is_valid( n12 )); + BOOST_CHECK ( ub::is_valid( n21 )); + BOOST_CHECK ( ub::is_valid( n22 )); + BOOST_CHECK ( ub::is_valid( n32 )); + BOOST_CHECK ( ub::is_valid( n111 )); + BOOST_CHECK ( ub::is_valid( n211 )); + BOOST_CHECK ( ub::is_valid( n121 )); + BOOST_CHECK ( ub::is_valid( n112 )); + BOOST_CHECK ( ub::is_valid( n123 )); + BOOST_CHECK ( ub::is_valid( n321 )); + BOOST_CHECK ( ub::is_valid( n213 )); + BOOST_CHECK ( ub::is_valid( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_is_valid, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("is_valid")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( ub::is_valid( n )); + BOOST_CHECK ( ub::is_valid( n1 )); + BOOST_CHECK ( ub::is_valid( n2 )); + BOOST_CHECK ( ub::is_valid( n11 )); + BOOST_CHECK ( ub::is_valid( n12 )); + BOOST_CHECK ( ub::is_valid( n21 )); + BOOST_CHECK ( ub::is_valid( n22 )); + BOOST_CHECK ( ub::is_valid( n32 )); + BOOST_CHECK ( ub::is_valid( n111 )); + BOOST_CHECK ( ub::is_valid( n211 )); + BOOST_CHECK ( ub::is_valid( n121 )); + BOOST_CHECK ( ub::is_valid( n112 )); + BOOST_CHECK ( ub::is_valid( n123 )); + BOOST_CHECK ( ub::is_valid( n321 )); + BOOST_CHECK ( ub::is_valid( n213 )); + BOOST_CHECK ( ub::is_valid( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_is_valid, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("is_valid")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK ( ub::is_valid( n )); +//FIXME: BOOST_CHECK ( ub::is_valid( n1 )); +//FIXME: BOOST_CHECK ( ub::is_valid( n2 )); + BOOST_CHECK ( ub::is_valid( n11 )); + BOOST_CHECK ( ub::is_valid( n12 )); + BOOST_CHECK ( ub::is_valid( n21 )); + BOOST_CHECK ( ub::is_valid( n22 )); + BOOST_CHECK ( ub::is_valid( n32 )); + BOOST_CHECK ( ub::is_valid( n111 )); + BOOST_CHECK ( ub::is_valid( n211 )); + BOOST_CHECK ( ub::is_valid( n121 )); + BOOST_CHECK ( ub::is_valid( n112 )); + BOOST_CHECK ( ub::is_valid( n123 )); + BOOST_CHECK ( ub::is_valid( n321 )); + BOOST_CHECK ( ub::is_valid( n213 )); + BOOST_CHECK ( ub::is_valid( n432 )); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_product, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("product")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK_EQUAL ( ub::product( n ), 0U); + BOOST_CHECK_EQUAL ( ub::product( n1 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n2 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n11 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n12 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n21 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n22 ), 4U); + BOOST_CHECK_EQUAL ( ub::product( n32 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n111 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n211 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n121 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n112 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n123 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n321 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n213 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n432 ),24U); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_product, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("product")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK_EQUAL ( ub::product( n ), 0U); + BOOST_CHECK_EQUAL ( ub::product( n1 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n2 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n11 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n12 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n21 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n22 ), 4U); + BOOST_CHECK_EQUAL ( ub::product( n32 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n111 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n211 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n121 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n112 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n123 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n321 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n213 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n432 ),24U); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_product, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("product")) +{ + + namespace ub = boost::numeric::ublas; + BOOST_CHECK_EQUAL ( ub::product( n ), 0U); +//FIXME: BOOST_CHECK_EQUAL ( ub::product( n1 ), 1U); +//FIXME: BOOST_CHECK_EQUAL ( ub::product( n2 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n11 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n12 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n21 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n22 ), 4U); + BOOST_CHECK_EQUAL ( ub::product( n32 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n111 ), 1U); + BOOST_CHECK_EQUAL ( ub::product( n211 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n121 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n112 ), 2U); + BOOST_CHECK_EQUAL ( ub::product( n123 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n321 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n213 ), 6U); + BOOST_CHECK_EQUAL ( ub::product( n432 ),24U); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_equal, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("equal")) +{ + BOOST_CHECK ( n == n ); + BOOST_CHECK ( n1 == n1 ); + BOOST_CHECK ( n2 == n2 ); + BOOST_CHECK ( n11 == n11 ); + BOOST_CHECK ( n12 == n12 ); + BOOST_CHECK ( n21 == n21 ); + BOOST_CHECK ( n22 == n22 ); + BOOST_CHECK ( n32 == n32 ); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_rank_equal, + fixture_extents_static_rank, + *boost::unit_test::label("extents_static_rank") + *boost::unit_test::label("equal")) +{ + BOOST_CHECK ( n == n ); + BOOST_CHECK ( n1 == n1 ); + BOOST_CHECK ( n2 == n2 ); + BOOST_CHECK ( n11 == n11 ); + BOOST_CHECK ( n12 == n12 ); + BOOST_CHECK ( n21 == n21 ); + BOOST_CHECK ( n22 == n22 ); + BOOST_CHECK ( n32 == n32 ); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_equal, + fixture_extents_static, + *boost::unit_test::label("extents_static") + *boost::unit_test::label("equal")) +{ + BOOST_CHECK ( n == n ); + BOOST_CHECK ( n1 == n1 ); + BOOST_CHECK ( n2 == n2 ); + BOOST_CHECK ( n11 == n11 ); + BOOST_CHECK ( n12 == n12 ); + BOOST_CHECK ( n21 == n21 ); + BOOST_CHECK ( n22 == n22 ); + BOOST_CHECK ( n32 == n32 ); +} + + +BOOST_FIXTURE_TEST_CASE(test_extents_dynamic_rank_not_equal, + fixture_extents_dynamic_rank, + *boost::unit_test::label("extents_dynamic_rank") + *boost::unit_test::label("not_equal")) +{ + BOOST_CHECK ( ! (n != n ) ); + BOOST_CHECK ( ! (n1 != n1) ); + BOOST_CHECK ( ! (n2 != n2) ); + BOOST_CHECK ( ! (n11 != n11) ); + BOOST_CHECK ( ! (n12 != n12) ); + BOOST_CHECK ( ! (n21 != n21) ); + BOOST_CHECK ( ! (n22 != n22) ); + BOOST_CHECK ( ! (n32 != n32) ); + BOOST_CHECK ( (n2 != n1) ); + BOOST_CHECK ( (n11 != n12) ); + BOOST_CHECK ( (n12 != n21) ); + BOOST_CHECK ( (n21 != n22) ); + BOOST_CHECK ( (n22 != n32) ); +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_fixed_rank_expression_evaluation.cpp b/test/tensor/test_fixed_rank_expression_evaluation.cpp index 97aba6aec..a1e06885b 100644 --- a/test/tensor/test_fixed_rank_expression_evaluation.cpp +++ b/test/tensor/test_fixed_rank_expression_evaluation.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,150 +12,150 @@ -#include -#include "utility.hpp" +#include +#include #include +#include "utility.hpp" + +#include #include +#include -BOOST_AUTO_TEST_SUITE(test_fixed_rank_tensor_expression); +BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_expression) using test_types = zip>::with_t; + + struct fixture { - template - using extents_type = boost::numeric::ublas::extents; - - std::tuple< - extents_type<0>, // 0 - extents_type<2>, // 1 - extents_type<2>, // 2 - extents_type<2>, // 3 - extents_type<2>, // 4 - extents_type<3>, // 5 - extents_type<3>, // 6 - extents_type<3>, // 7 - extents_type<3>, // 8 - extents_type<4> // 9 - > extents = { - extents_type<0>{}, - extents_type<2>{1,1}, - extents_type<2>{1,2}, - extents_type<2>{2,1}, - extents_type<2>{2,3}, - extents_type<3>{2,3,1}, - extents_type<3>{4,1,3}, - extents_type<3>{1,2,3}, - extents_type<3>{4,2,3}, - extents_type<4>{4,2,3,5} - }; + template + using extents_t = boost::numeric::ublas::extents; + + static constexpr auto extents = + std::make_tuple( +// extents_t<0> {}, + extents_t<2> {1,1}, + extents_t<2> {1,2}, + extents_t<2> {2,1}, + extents_t<2> {2,3}, + extents_t<3> {2,3,1}, + extents_t<3> {4,1,3}, + extents_t<3> {1,2,3}, + extents_t<3> {4,2,3}, + extents_t<4>{4,2,3,5} ); }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_expression_retrieve_extents, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_retrieve_extents, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); - auto bplus = std::plus {}; - auto bminus = std::minus{}; + auto uplus1 = [](auto const& a){return a + value_t(1); }; + auto uplus2 = [](auto const& a){return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; - for_each_tuple(extents, [&](auto const&, auto & e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto const& e){ - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t){ tt = v; v+=value_type{1}; } - BOOST_CHECK( ublas::detail::retrieve_extents( t ) == e ); + static constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; - // uexpr1 = t+1 - // uexpr2 = 2+t - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); - BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) == e ); - BOOST_CHECK( ublas::detail::retrieve_extents( uexpr2 ) == e ); + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } - // bexpr_uexpr = (t+1) + (2+t) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == e ); + BOOST_CHECK( ublas::detail::retrieve_extents( t ) == e ); + // uexpr1 = t+1 + // uexpr2 = 2+t + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); - // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) == e ); + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr2 ) == e ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr ) == e ); + // bexpr_uexpr = (t+1) + (2+t) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - }); + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == e ); + + + // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); - for_each_tuple(extents, [&](auto I, auto& e1){ + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr ) == e ); - if ( I >= std::tuple_size_v - 1 ){ - return; - } - - using extents_type1 = std::decay_t; - using tensor_type1 = ublas::fixed_rank_tensor; + }); - for_each_tuple(extents, [&](auto J, auto& e2){ + for_each_in_tuple(extents, [&](auto I, auto const& e1){ - if( J != I + 1 ){ - return; - } - using extents_type2 = std::decay_t; - using tensor_type2 = ublas::fixed_rank_tensor; + if ( I >= std::tuple_size_v - 1 ){ + return; + } - auto v = value_type{}; + constexpr auto size1 = std::tuple_size_v>; + using tensor_type1 = ublas::tensor_static_rank; - tensor_type1 t1(e1); - for(auto& tt: t1){ tt = v; v+=value_type{1}; } + for_each_in_tuple(extents, [&,I](auto J, auto const& e2){ - tensor_type2 t2(e2); - for(auto& tt: t2){ tt = v; v+=value_type{2}; } + if( J != I + 1 ){ + return; + } - BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) != ublas::detail::retrieve_extents( t2 ) ); + static constexpr auto size1 = std::tuple_size_v>; + static constexpr auto size2 = std::tuple_size_v>; + using tensor_type2 = ublas::tensor_static_rank; - // uexpr1 = t1+1 - // uexpr2 = 2+t2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); + auto v = value_t{}; - BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( t2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) != ublas::detail::retrieve_extents( uexpr2 ) ); + tensor_type1 t1(e1); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } - if constexpr( extents_type1::_size == extents_type2::_size ){ - // bexpr_uexpr = (t1+1) + (2+t2) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + tensor_type2 t2(e2); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(t1) ); + BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) != ublas::detail::retrieve_extents( t2 ) ); + // uexpr1 = t1+1 + // uexpr2 = 2+t2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); - // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( t2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) != ublas::detail::retrieve_extents( uexpr2 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(t2) ); + if constexpr( size1 == size2 ){ + // bexpr_uexpr = (t1+1) + (2+t2) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(t1) ); - // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(t2) ); - } + // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(t2) ); + + + // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(t2) ); + } - }); }); + }); } @@ -164,122 +164,123 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_expression_retrieve_ext -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_expression_all_extents_equal, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_all_extents_equal, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); - auto bplus = std::plus {}; - auto bminus = std::minus{}; + auto uplus1 = [](auto const& a){return a + value_t(1); }; + auto uplus2 = [](auto const& a){return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; - for_each_tuple(extents, [&](auto const&, auto& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto& e){ + static constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; - auto t = tensor_type(e); - auto v = value_type{}; - for(auto& tt: t){ tt = v; v+=value_type{1}; } + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } - BOOST_CHECK( ublas::detail::all_extents_equal( t , e ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( t , e ) ); - // uexpr1 = t+1 - // uexpr2 = 2+t - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); - BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, e ) ); - BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, e ) ); + // uexpr1 = t+1 + // uexpr2 = 2+t + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); - // bexpr_uexpr = (t+1) + (2+t) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, e ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, e ) ); - BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_uexpr, e ) ); + // bexpr_uexpr = (t+1) + (2+t) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_uexpr, e ) ); - // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); - BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_bexpr_uexpr , e ) ); + // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); - }); + BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_bexpr_uexpr , e ) ); + + }); + + + for_each_in_tuple(extents, [&](auto I, auto& e1){ + if ( I >= std::tuple_size_v - 1){ + return; + } - for_each_tuple(extents, [&](auto I, auto& e1){ + static constexpr auto size1 = std::tuple_size_v>; + using tensor_type1 = ublas::tensor_static_rank; - if ( I >= std::tuple_size_v - 1){ - return; - } - - using extents_type1 = std::decay_t; - using tensor_type1 = ublas::fixed_rank_tensor; + for_each_in_tuple(extents, [&](auto J, auto& e2){ - for_each_tuple(extents, [&](auto J, auto& e2){ + if( J != I + 1 ){ + return; + } - if( J != I + 1 ){ - return; - } - using extents_type2 = std::decay_t; - using tensor_type2 = ublas::fixed_rank_tensor; + static constexpr auto size2 = std::tuple_size_v>; + using tensor_type2 = ublas::tensor_static_rank; - auto v = value_type{}; + auto v = value_t{}; - tensor_type1 t1(e1); - for(auto& tt: t1){ tt = v; v+=value_type{1}; } + tensor_type1 t1(e1); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } - tensor_type2 t2(e2); - for(auto& tt: t2){ tt = v; v+=value_type{2}; } + tensor_type2 t2(e2); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } - BOOST_CHECK( ublas::detail::all_extents_equal( t1, ublas::detail::retrieve_extents(t1) ) ); - BOOST_CHECK( ublas::detail::all_extents_equal( t2, ublas::detail::retrieve_extents(t2) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( t1, ublas::detail::retrieve_extents(t1) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( t2, ublas::detail::retrieve_extents(t2) ) ); - // uexpr1 = t1+1 - // uexpr2 = 2+t2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); + // uexpr1 = t1+1 + // uexpr2 = 2+t2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); - BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, ublas::detail::retrieve_extents(uexpr1) ) ); - BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, ublas::detail::retrieve_extents(uexpr2) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, ublas::detail::retrieve_extents(uexpr1) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, ublas::detail::retrieve_extents(uexpr2) ) ); - if constexpr( extents_type1::_size == extents_type2::_size ){ - // bexpr_uexpr = (t1+1) + (2+t2) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + if constexpr( size1 == size2 ){ + // bexpr_uexpr = (t1+1) + (2+t2) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr, ublas::detail::retrieve_extents( bexpr_uexpr ) ) ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr, ublas::detail::retrieve_extents( bexpr_uexpr ) ) ); - // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr1, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) ) ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr1, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) ) ); - // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) ) ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) ) ); - // bexpr_uexpr2 = (t1+1) + t2 - auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, t2, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_uexpr2 ) ) ); + // bexpr_uexpr2 = (t1+1) + t2 + auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, t2, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_uexpr2 ) ) ); - // bexpr_uexpr2 = ((t1+1) + t2) + t1 - auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr3, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr3 ) ) ); + // bexpr_uexpr2 = ((t1+1) + t2) + t1 + auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr3, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr3 ) ) ); - // bexpr_uexpr2 = t1 + (((t1+1) + t2) + t1) - auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr4, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr4 ) ) ); - } + // bexpr_uexpr2 = t1 + (((t1+1) + t2) + t1) + auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr4, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr4 ) ) ); + } - }); }); + }); } diff --git a/test/tensor/test_fixed_rank_extents.cpp b/test/tensor/test_fixed_rank_extents.cpp index 5d55d6b01..ac873f55c 100644 --- a/test/tensor/test_fixed_rank_extents.cpp +++ b/test/tensor/test_fixed_rank_extents.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -11,544 +11,543 @@ // #include -#include // Needed for squeeze -#include +#include #include -BOOST_AUTO_TEST_SUITE ( test_fixed_rank_extents ) +BOOST_AUTO_TEST_SUITE ( test_extents_static_size ) //*boost::unit_test::label("extents") //*boost::unit_test::label("constructor") -BOOST_AUTO_TEST_CASE(test_fixed_rank_extents_ctor) +BOOST_AUTO_TEST_CASE(test_extents_static_size_ctor) { - namespace ub = boost::numeric::ublas; - - - auto e0 = ub::extents<0>{}; - BOOST_CHECK( e0.empty()); - BOOST_CHECK ( e0.size() == 0); - - auto e1 = ub::extents<2>{1,1}; - BOOST_CHECK(!e1.empty()); - BOOST_CHECK ( e1.size() == 2); - - auto e2 = ub::extents<2>{1,2}; - BOOST_CHECK(!e2.empty()); - BOOST_CHECK ( e2.size() == 2); - - auto e3 = ub::extents<2>{2,1}; - BOOST_CHECK (!e3.empty()); - BOOST_CHECK ( e3.size() == 2); - - auto e4 = ub::extents<2>{2,3}; - BOOST_CHECK(!e4.empty()); - BOOST_CHECK ( e4.size() == 2); - - auto e5 = ub::extents<3>{2,3,1}; - BOOST_CHECK (!e5.empty()); - BOOST_CHECK ( e5.size() == 3); - - auto e6 = ub::extents<3>{1,2,3}; // 6 - BOOST_CHECK(!e6.empty()); - BOOST_CHECK ( e6.size() == 3); - - auto e7 = ub::extents<3>{4,2,3}; // 7 - BOOST_CHECK(!e7.empty()); - BOOST_CHECK ( e7.size() == 3); - - BOOST_CHECK_THROW( ub::extents<2>({1,0}), std::length_error); - BOOST_CHECK_THROW( ub::extents<1>({0} ), std::length_error); - BOOST_CHECK_THROW( ub::extents<1>({3} ), std::length_error); - BOOST_CHECK_THROW( ub::extents<2>({0,1}), std::length_error); - BOOST_CHECK_THROW( ub::extents<2>({1,1,2}), std::out_of_range); + namespace ub = boost::numeric::ublas; + + +// auto e = ub::extents<0>{}; + auto e11 = ub::extents<2>{1,1}; + auto e12 = ub::extents<2>{1,2}; + auto e21 = ub::extents<2>{2,1}; + auto e23 = ub::extents<2>{2,3}; + auto e231 = ub::extents<3>{2,3,1}; + auto e123 = ub::extents<3>{1,2,3}; // 6 + auto e423 = ub::extents<3>{4,2,3}; // 7 + + + BOOST_CHECK (!ub::empty(e11)); + BOOST_CHECK (!ub::empty(e12)); + BOOST_CHECK (!ub::empty(e21)); + BOOST_CHECK (!ub::empty(e23)); + BOOST_CHECK (!ub::empty(e231)); + BOOST_CHECK (!ub::empty(e123)); + BOOST_CHECK (!ub::empty(e423)); + + BOOST_CHECK ( ub::size (e11) == 2); + BOOST_CHECK ( ub::size (e12) == 2); + BOOST_CHECK ( ub::size (e21) == 2); + BOOST_CHECK ( ub::size (e23) == 2); + BOOST_CHECK ( ub::size(e231) == 3); + BOOST_CHECK ( ub::size(e123) == 3); + BOOST_CHECK ( ub::size(e423) == 3); + + + BOOST_CHECK_THROW( ub::extents<2>({1,0}), std::invalid_argument); + BOOST_CHECK_THROW( ub::extents<1>({0} ), std::invalid_argument); + BOOST_CHECK_THROW( ub::extents<2>({0,1}), std::invalid_argument); + BOOST_CHECK_THROW( ub::extents<2>({1,1,2}), std::length_error); } struct fixture { - template - using extents = boost::numeric::ublas::extents; - - extents<0> de0{}; // 0 - - extents<2> de1{1,1}; // 1 - extents<2> de2{1,2}; // 2 - extents<2> de3{2,1}; // 3 - - extents<2> de4{2,3}; // 4 - extents<3> de5{2,3,1}; // 5 - extents<3> de6{1,2,3}; // 6 - extents<4> de7{1,1,2,3}; // 7 - extents<5> de8{1,2,3,1,1}; // 8 - - extents<3> de9{4,2,3}; // 9 - extents<4> de10{4,2,1,3}; // 10 - extents<5> de11{4,2,1,3,1}; // 11 - extents<6> de12{1,4,2,1,3,1};// 12 - - extents<3> de13{1,4,1}; // 13 - extents<4> de14{1,1,1,1}; // 14 - extents<5> de15{1,4,1,1,1}; // 15 - extents<6> de16{1,1,2,1,1,1};// 16 - extents<6> de17{1,1,2,3,1,1};// 17 + template + using extents = boost::numeric::ublas::extents; + +// extents<0> de {}; + + extents<2> de11 {1,1}; + extents<2> de12 {1,2}; + extents<2> de21 {2,1}; + + extents<2> de23 {2,3}; + extents<3> de231 {2,3,1}; + extents<3> de123 {1,2,3}; + extents<4> de1123 {1,1,2,3}; + extents<5> de12311 {1,2,3,1,1}; + + extents<3> de423 {4,2,3}; + extents<4> de4213 {4,2,1,3}; + extents<5> de42131 {4,2,1,3,1}; + extents<6> de142131 {1,4,2,1,3,1}; + + extents<3> de141 {1,4,1}; + extents<4> de1111 {1,1,1,1}; + extents<5> de14111 {1,4,1,1,1}; + extents<6> de112111 {1,1,2,1,1,1}; + extents<6> de112311 {1,1,2,3,1,1}; }; -BOOST_FIXTURE_TEST_CASE(test_fixed_rank_extents_access, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("access")) -{ - using namespace boost::numeric; - - BOOST_CHECK_EQUAL(de0.size(), 0); - BOOST_CHECK (de0.empty() ); - - BOOST_REQUIRE_EQUAL(de1.size(), 2); - BOOST_REQUIRE_EQUAL(de2.size(), 2); - BOOST_REQUIRE_EQUAL(de3.size(), 2); - BOOST_REQUIRE_EQUAL(de4.size(), 2); - BOOST_REQUIRE_EQUAL(de5.size(), 3); - BOOST_REQUIRE_EQUAL(de6.size(), 3); - BOOST_REQUIRE_EQUAL(de7.size(), 4); - BOOST_REQUIRE_EQUAL(de8.size(), 5); - BOOST_REQUIRE_EQUAL(de9.size(), 3); - BOOST_REQUIRE_EQUAL(de10.size(), 4); - BOOST_REQUIRE_EQUAL(de11.size(), 5); - BOOST_REQUIRE_EQUAL(de12.size(), 6); - BOOST_REQUIRE_EQUAL(de13.size(), 3); - BOOST_REQUIRE_EQUAL(de14.size(), 4); - BOOST_REQUIRE_EQUAL(de15.size(), 5); - BOOST_REQUIRE_EQUAL(de16.size(), 6); - BOOST_REQUIRE_EQUAL(de17.size(), 6); - - - BOOST_CHECK_EQUAL(de1[0],1); - BOOST_CHECK_EQUAL(de1[1],1); - - BOOST_CHECK_EQUAL(de2[0],1); - BOOST_CHECK_EQUAL(de2[1],2); - - BOOST_CHECK_EQUAL(de3[0],2); - BOOST_CHECK_EQUAL(de3[1],1); - - BOOST_CHECK_EQUAL(de4[0],2); - BOOST_CHECK_EQUAL(de4[1],3); - - BOOST_CHECK_EQUAL(de5[0],2); - BOOST_CHECK_EQUAL(de5[1],3); - BOOST_CHECK_EQUAL(de5[2],1); - - BOOST_CHECK_EQUAL(de6[0],1); - BOOST_CHECK_EQUAL(de6[1],2); - BOOST_CHECK_EQUAL(de6[2],3); - - BOOST_CHECK_EQUAL(de7[0],1); - BOOST_CHECK_EQUAL(de7[1],1); - BOOST_CHECK_EQUAL(de7[2],2); - BOOST_CHECK_EQUAL(de7[3],3); - - BOOST_CHECK_EQUAL(de8[0],1); - BOOST_CHECK_EQUAL(de8[1],2); - BOOST_CHECK_EQUAL(de8[2],3); - BOOST_CHECK_EQUAL(de8[3],1); - BOOST_CHECK_EQUAL(de8[4],1); - - BOOST_CHECK_EQUAL(de9[0],4); - BOOST_CHECK_EQUAL(de9[1],2); - BOOST_CHECK_EQUAL(de9[2],3); - - BOOST_CHECK_EQUAL(de10[0],4); - BOOST_CHECK_EQUAL(de10[1],2); - BOOST_CHECK_EQUAL(de10[2],1); - BOOST_CHECK_EQUAL(de10[3],3); - - BOOST_CHECK_EQUAL(de11[0],4); - BOOST_CHECK_EQUAL(de11[1],2); - BOOST_CHECK_EQUAL(de11[2],1); - BOOST_CHECK_EQUAL(de11[3],3); - BOOST_CHECK_EQUAL(de11[4],1); - - BOOST_CHECK_EQUAL(de12[0],1); - BOOST_CHECK_EQUAL(de12[1],4); - BOOST_CHECK_EQUAL(de12[2],2); - BOOST_CHECK_EQUAL(de12[3],1); - BOOST_CHECK_EQUAL(de12[4],3); - BOOST_CHECK_EQUAL(de12[5],1); - - BOOST_CHECK_EQUAL(de13[0],1); - BOOST_CHECK_EQUAL(de13[1],4); - BOOST_CHECK_EQUAL(de13[2],1); - - BOOST_CHECK_EQUAL(de14[0],1); - BOOST_CHECK_EQUAL(de14[1],1); - BOOST_CHECK_EQUAL(de14[2],1); - BOOST_CHECK_EQUAL(de14[3],1); - - BOOST_CHECK_EQUAL(de15[0],1); - BOOST_CHECK_EQUAL(de15[1],4); - BOOST_CHECK_EQUAL(de15[2],1); - BOOST_CHECK_EQUAL(de15[3],1); - BOOST_CHECK_EQUAL(de15[4],1); - - BOOST_CHECK_EQUAL(de16[0],1); - BOOST_CHECK_EQUAL(de16[1],1); - BOOST_CHECK_EQUAL(de16[2],2); - BOOST_CHECK_EQUAL(de16[3],1); - BOOST_CHECK_EQUAL(de16[4],1); - BOOST_CHECK_EQUAL(de16[5],1); - - BOOST_CHECK_EQUAL(de17[0],1); - BOOST_CHECK_EQUAL(de17[1],1); - BOOST_CHECK_EQUAL(de17[2],2); - BOOST_CHECK_EQUAL(de17[3],3); - BOOST_CHECK_EQUAL(de17[4],1); - BOOST_CHECK_EQUAL(de17[5],1); -} - -BOOST_FIXTURE_TEST_CASE(test_fixed_rank_extents_copy_ctor, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("copy_ctor")) +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_access, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("access")) { - auto e0 = de0; // {} - auto e1 = de1; // {1,1} - auto e2 = de2; // {1,2} - auto e3 = de3; // {2,1} - auto e4 = de4; // {2,3} - auto e5 = de5; // {2,3,1} - auto e6 = de6; // {1,2,3} - auto e7 = de7; // {1,1,2,3} - auto e8 = de8; // {1,2,3,1,1} - auto e9 = de9; // {4,2,3} - auto e10 = de10; // {4,2,1,3} - auto e11 = de11; // {4,2,1,3,1} - auto e12 = de12; // {1,4,2,1,3,1} - auto e13 = de13; // {1,4,1} - auto e14 = de14; // {1,1,1,1} - auto e15 = de15; // {1,4,1,1,1} - auto e16 = de16; // {1,1,2,1,1,1} - auto e17 = de17; // {1,1,2,3,1,1} - - BOOST_CHECK_EQUAL (e0.size(), 0); - BOOST_CHECK (e0.empty() ); - - BOOST_REQUIRE_EQUAL(e1 .size(), 2); - BOOST_REQUIRE_EQUAL(e2 .size(), 2); - BOOST_REQUIRE_EQUAL(e3 .size(), 2); - BOOST_REQUIRE_EQUAL(e4 .size(), 2); - BOOST_REQUIRE_EQUAL(e5 .size(), 3); - BOOST_REQUIRE_EQUAL(e6 .size(), 3); - BOOST_REQUIRE_EQUAL(e7 .size(), 4); - BOOST_REQUIRE_EQUAL(e8 .size(), 5); - BOOST_REQUIRE_EQUAL(e9 .size(), 3); - BOOST_REQUIRE_EQUAL(e10.size(), 4); - BOOST_REQUIRE_EQUAL(e11.size(), 5); - BOOST_REQUIRE_EQUAL(e12.size(), 6); - BOOST_REQUIRE_EQUAL(e13.size(), 3); - BOOST_REQUIRE_EQUAL(e14.size(), 4); - BOOST_REQUIRE_EQUAL(e15.size(), 5); - BOOST_REQUIRE_EQUAL(e16.size(), 6); - BOOST_REQUIRE_EQUAL(e17.size(), 6); - - - BOOST_CHECK_EQUAL(e1[0],1); - BOOST_CHECK_EQUAL(e1[1],1); - - BOOST_CHECK_EQUAL(e2[0],1); - BOOST_CHECK_EQUAL(e2[1],2); - - BOOST_CHECK_EQUAL(e3[0],2); - BOOST_CHECK_EQUAL(e3[1],1); - - BOOST_CHECK_EQUAL(e4[0],2); - BOOST_CHECK_EQUAL(e4[1],3); - - BOOST_CHECK_EQUAL(e5[0],2); - BOOST_CHECK_EQUAL(e5[1],3); - BOOST_CHECK_EQUAL(e5[2],1); - - BOOST_CHECK_EQUAL(e6[0],1); - BOOST_CHECK_EQUAL(e6[1],2); - BOOST_CHECK_EQUAL(e6[2],3); - - BOOST_CHECK_EQUAL(e7[0],1); - BOOST_CHECK_EQUAL(e7[1],1); - BOOST_CHECK_EQUAL(e7[2],2); - BOOST_CHECK_EQUAL(e7[3],3); - - BOOST_CHECK_EQUAL(e8[0],1); - BOOST_CHECK_EQUAL(e8[1],2); - BOOST_CHECK_EQUAL(e8[2],3); - BOOST_CHECK_EQUAL(e8[3],1); - BOOST_CHECK_EQUAL(e8[4],1); - - BOOST_CHECK_EQUAL(e9[0],4); - BOOST_CHECK_EQUAL(e9[1],2); - BOOST_CHECK_EQUAL(e9[2],3); - - BOOST_CHECK_EQUAL(e10[0],4); - BOOST_CHECK_EQUAL(e10[1],2); - BOOST_CHECK_EQUAL(e10[2],1); - BOOST_CHECK_EQUAL(e10[3],3); - - BOOST_CHECK_EQUAL(e11[0],4); - BOOST_CHECK_EQUAL(e11[1],2); - BOOST_CHECK_EQUAL(e11[2],1); - BOOST_CHECK_EQUAL(e11[3],3); - BOOST_CHECK_EQUAL(e11[4],1); - - BOOST_CHECK_EQUAL(e12[0],1); - BOOST_CHECK_EQUAL(e12[1],4); - BOOST_CHECK_EQUAL(e12[2],2); - BOOST_CHECK_EQUAL(e12[3],1); - BOOST_CHECK_EQUAL(e12[4],3); - BOOST_CHECK_EQUAL(e12[5],1); - - BOOST_CHECK_EQUAL(e13[0],1); - BOOST_CHECK_EQUAL(e13[1],4); - BOOST_CHECK_EQUAL(e13[2],1); - - BOOST_CHECK_EQUAL(e14[0],1); - BOOST_CHECK_EQUAL(e14[1],1); - BOOST_CHECK_EQUAL(e14[2],1); - BOOST_CHECK_EQUAL(e14[3],1); - - BOOST_CHECK_EQUAL(e15[0],1); - BOOST_CHECK_EQUAL(e15[1],4); - BOOST_CHECK_EQUAL(e15[2],1); - BOOST_CHECK_EQUAL(e15[3],1); - BOOST_CHECK_EQUAL(e15[4],1); - - BOOST_CHECK_EQUAL(e16[0],1); - BOOST_CHECK_EQUAL(e16[1],1); - BOOST_CHECK_EQUAL(e16[2],2); - BOOST_CHECK_EQUAL(e16[3],1); - BOOST_CHECK_EQUAL(e16[4],1); - BOOST_CHECK_EQUAL(e16[5],1); - - BOOST_CHECK_EQUAL(e17[0],1); - BOOST_CHECK_EQUAL(e17[1],1); - BOOST_CHECK_EQUAL(e17[2],2); - BOOST_CHECK_EQUAL(e17[3],3); - BOOST_CHECK_EQUAL(e17[4],1); - BOOST_CHECK_EQUAL(e17[5],1); - + namespace ublas = boost::numeric::ublas; + +// BOOST_REQUIRE_EQUAL(ublas::size(de), 0); +// BOOST_CHECK (ublas::empty(de) ); + + BOOST_REQUIRE_EQUAL(ublas::size(de11) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de12) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de21) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de23) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de231) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de123) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de1123) , 4); + BOOST_REQUIRE_EQUAL(ublas::size(de12311) , 5); + BOOST_REQUIRE_EQUAL(ublas::size(de423) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de4213) , 4); + BOOST_REQUIRE_EQUAL(ublas::size(de42131) , 5); + BOOST_REQUIRE_EQUAL(ublas::size(de142131), 6); + BOOST_REQUIRE_EQUAL(ublas::size(de141) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de1111) , 4); + BOOST_REQUIRE_EQUAL(ublas::size(de14111) , 5); + BOOST_REQUIRE_EQUAL(ublas::size(de112111), 6); + BOOST_REQUIRE_EQUAL(ublas::size(de112311), 6); + + + BOOST_CHECK_EQUAL(de11[0],1); + BOOST_CHECK_EQUAL(de11[1],1); + + BOOST_CHECK_EQUAL(de12[0],1); + BOOST_CHECK_EQUAL(de12[1],2); + + BOOST_CHECK_EQUAL(de21[0],2); + BOOST_CHECK_EQUAL(de21[1],1); + + BOOST_CHECK_EQUAL(de23[0],2); + BOOST_CHECK_EQUAL(de23[1],3); + + BOOST_CHECK_EQUAL(de231[0],2); + BOOST_CHECK_EQUAL(de231[1],3); + BOOST_CHECK_EQUAL(de231[2],1); + + BOOST_CHECK_EQUAL(de123[0],1); + BOOST_CHECK_EQUAL(de123[1],2); + BOOST_CHECK_EQUAL(de123[2],3); + + BOOST_CHECK_EQUAL(de1123[0],1); + BOOST_CHECK_EQUAL(de1123[1],1); + BOOST_CHECK_EQUAL(de1123[2],2); + BOOST_CHECK_EQUAL(de1123[3],3); + + BOOST_CHECK_EQUAL(de12311[0],1); + BOOST_CHECK_EQUAL(de12311[1],2); + BOOST_CHECK_EQUAL(de12311[2],3); + BOOST_CHECK_EQUAL(de12311[3],1); + BOOST_CHECK_EQUAL(de12311[4],1); + + BOOST_CHECK_EQUAL(de423[0],4); + BOOST_CHECK_EQUAL(de423[1],2); + BOOST_CHECK_EQUAL(de423[2],3); + + BOOST_CHECK_EQUAL(de4213[0],4); + BOOST_CHECK_EQUAL(de4213[1],2); + BOOST_CHECK_EQUAL(de4213[2],1); + BOOST_CHECK_EQUAL(de4213[3],3); + + BOOST_CHECK_EQUAL(de42131[0],4); + BOOST_CHECK_EQUAL(de42131[1],2); + BOOST_CHECK_EQUAL(de42131[2],1); + BOOST_CHECK_EQUAL(de42131[3],3); + BOOST_CHECK_EQUAL(de42131[4],1); + + BOOST_CHECK_EQUAL(de142131[0],1); + BOOST_CHECK_EQUAL(de142131[1],4); + BOOST_CHECK_EQUAL(de142131[2],2); + BOOST_CHECK_EQUAL(de142131[3],1); + BOOST_CHECK_EQUAL(de142131[4],3); + BOOST_CHECK_EQUAL(de142131[5],1); + + BOOST_CHECK_EQUAL(de141[0],1); + BOOST_CHECK_EQUAL(de141[1],4); + BOOST_CHECK_EQUAL(de141[2],1); + + BOOST_CHECK_EQUAL(de1111[0],1); + BOOST_CHECK_EQUAL(de1111[1],1); + BOOST_CHECK_EQUAL(de1111[2],1); + BOOST_CHECK_EQUAL(de1111[3],1); + + BOOST_CHECK_EQUAL(de14111[0],1); + BOOST_CHECK_EQUAL(de14111[1],4); + BOOST_CHECK_EQUAL(de14111[2],1); + BOOST_CHECK_EQUAL(de14111[3],1); + BOOST_CHECK_EQUAL(de14111[4],1); + + BOOST_CHECK_EQUAL(de112111[0],1); + BOOST_CHECK_EQUAL(de112111[1],1); + BOOST_CHECK_EQUAL(de112111[2],2); + BOOST_CHECK_EQUAL(de112111[3],1); + BOOST_CHECK_EQUAL(de112111[4],1); + BOOST_CHECK_EQUAL(de112111[5],1); + + BOOST_CHECK_EQUAL(de112311[0],1); + BOOST_CHECK_EQUAL(de112311[1],1); + BOOST_CHECK_EQUAL(de112311[2],2); + BOOST_CHECK_EQUAL(de112311[3],3); + BOOST_CHECK_EQUAL(de112311[4],1); + BOOST_CHECK_EQUAL(de112311[5],1); } -BOOST_FIXTURE_TEST_CASE(test_fixed_rank_extents_is, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("query")) +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_copy_ctor, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("copy_ctor")) { + namespace ublas = boost::numeric::ublas; + +// auto e = de; + auto e1 = de11; + auto e12 = de12; + auto e21 = de21; + auto e23 = de23; + auto e231 = de231; + auto e123 = de123; + auto e1123 = de1123; + auto e12311 = de12311; + auto e423 = de423; + auto e4213 = de4213; + auto e42131 = de42131; + auto e142131 = de142131; + auto e141 = de141; + auto e1111 = de1111; + auto e14111 = de14111; + auto e112111 = de112111; + auto e112311 = de112311; + + +// BOOST_CHECK (ublas::empty(e) ); + +// BOOST_REQUIRE_EQUAL(ublas::size(e) , 0); + BOOST_REQUIRE_EQUAL(ublas::size(e1) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e12) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e21) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e23) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e231), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e123), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e1123), 4); + BOOST_REQUIRE_EQUAL(ublas::size(e12311), 5); + BOOST_REQUIRE_EQUAL(ublas::size(e423), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e4213), 4); + BOOST_REQUIRE_EQUAL(ublas::size(e42131), 5); + BOOST_REQUIRE_EQUAL(ublas::size(e142131), 6); + BOOST_REQUIRE_EQUAL(ublas::size(e141), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e1111), 4); + BOOST_REQUIRE_EQUAL(ublas::size(e14111), 5); + BOOST_REQUIRE_EQUAL(ublas::size(e112111), 6); + BOOST_REQUIRE_EQUAL(ublas::size(e112311), 6); + + + BOOST_CHECK_EQUAL(e1[0],1); + BOOST_CHECK_EQUAL(e1[1],1); + + BOOST_CHECK_EQUAL(e12[0],1); + BOOST_CHECK_EQUAL(e12[1],2); + + BOOST_CHECK_EQUAL(e21[0],2); + BOOST_CHECK_EQUAL(e21[1],1); + + BOOST_CHECK_EQUAL(e23[0],2); + BOOST_CHECK_EQUAL(e23[1],3); + + BOOST_CHECK_EQUAL(e231[0],2); + BOOST_CHECK_EQUAL(e231[1],3); + BOOST_CHECK_EQUAL(e231[2],1); + + BOOST_CHECK_EQUAL(e123[0],1); + BOOST_CHECK_EQUAL(e123[1],2); + BOOST_CHECK_EQUAL(e123[2],3); + + BOOST_CHECK_EQUAL(e1123[0],1); + BOOST_CHECK_EQUAL(e1123[1],1); + BOOST_CHECK_EQUAL(e1123[2],2); + BOOST_CHECK_EQUAL(e1123[3],3); + + BOOST_CHECK_EQUAL(e12311[0],1); + BOOST_CHECK_EQUAL(e12311[1],2); + BOOST_CHECK_EQUAL(e12311[2],3); + BOOST_CHECK_EQUAL(e12311[3],1); + BOOST_CHECK_EQUAL(e12311[4],1); + + BOOST_CHECK_EQUAL(e423[0],4); + BOOST_CHECK_EQUAL(e423[1],2); + BOOST_CHECK_EQUAL(e423[2],3); + + BOOST_CHECK_EQUAL(e4213[0],4); + BOOST_CHECK_EQUAL(e4213[1],2); + BOOST_CHECK_EQUAL(e4213[2],1); + BOOST_CHECK_EQUAL(e4213[3],3); + + BOOST_CHECK_EQUAL(e42131[0],4); + BOOST_CHECK_EQUAL(e42131[1],2); + BOOST_CHECK_EQUAL(e42131[2],1); + BOOST_CHECK_EQUAL(e42131[3],3); + BOOST_CHECK_EQUAL(e42131[4],1); + + BOOST_CHECK_EQUAL(e142131[0],1); + BOOST_CHECK_EQUAL(e142131[1],4); + BOOST_CHECK_EQUAL(e142131[2],2); + BOOST_CHECK_EQUAL(e142131[3],1); + BOOST_CHECK_EQUAL(e142131[4],3); + BOOST_CHECK_EQUAL(e142131[5],1); + + BOOST_CHECK_EQUAL(e141[0],1); + BOOST_CHECK_EQUAL(e141[1],4); + BOOST_CHECK_EQUAL(e141[2],1); + + BOOST_CHECK_EQUAL(e1111[0],1); + BOOST_CHECK_EQUAL(e1111[1],1); + BOOST_CHECK_EQUAL(e1111[2],1); + BOOST_CHECK_EQUAL(e1111[3],1); + + BOOST_CHECK_EQUAL(e14111[0],1); + BOOST_CHECK_EQUAL(e14111[1],4); + BOOST_CHECK_EQUAL(e14111[2],1); + BOOST_CHECK_EQUAL(e14111[3],1); + BOOST_CHECK_EQUAL(e14111[4],1); + + BOOST_CHECK_EQUAL(e112111[0],1); + BOOST_CHECK_EQUAL(e112111[1],1); + BOOST_CHECK_EQUAL(e112111[2],2); + BOOST_CHECK_EQUAL(e112111[3],1); + BOOST_CHECK_EQUAL(e112111[4],1); + BOOST_CHECK_EQUAL(e112111[5],1); + + BOOST_CHECK_EQUAL(e112311[0],1); + BOOST_CHECK_EQUAL(e112311[1],1); + BOOST_CHECK_EQUAL(e112311[2],2); + BOOST_CHECK_EQUAL(e112311[3],3); + BOOST_CHECK_EQUAL(e112311[4],1); + BOOST_CHECK_EQUAL(e112311[5],1); - auto e0 = de0; // {} - auto e1 = de1; // {1,1} - auto e2 = de2; // {1,2} - auto e3 = de3; // {2,1} - auto e4 = de4; // {2,3} - auto e5 = de5; // {2,3,1} - auto e6 = de6; // {1,2,3} - auto e7 = de7; // {1,1,2,3} - auto e8 = de8; // {1,2,3,1,1} - auto e9 = de9; // {4,2,3} - auto e10 = de10; // {4,2,1,3} - auto e11 = de11; // {4,2,1,3,1} - auto e12 = de12; // {1,4,2,1,3,1} - auto e13 = de13; // {1,4,1} - auto e14 = de14; // {1,1,1,1} - auto e15 = de15; // {1,4,1,1,1} - auto e16 = de16; // {1,1,2,1,1,1} - auto e17 = de17; // {1,1,2,3,1,1} - - BOOST_CHECK( e0.empty ( )); - BOOST_CHECK( ! is_scalar(e0)); - BOOST_CHECK( ! is_vector(e0)); - BOOST_CHECK( ! is_matrix(e0)); - BOOST_CHECK( ! is_tensor(e0)); - - BOOST_CHECK( ! e1.empty ( ) ); - BOOST_CHECK( is_scalar(e1) ); - BOOST_CHECK( ! is_vector(e1) ); - BOOST_CHECK( ! is_matrix(e1) ); - BOOST_CHECK( ! is_tensor(e1) ); - - BOOST_CHECK( ! e2.empty ( ) ); - BOOST_CHECK( ! is_scalar(e2) ); - BOOST_CHECK( is_vector(e2) ); - BOOST_CHECK( ! is_matrix(e2) ); - BOOST_CHECK( ! is_tensor(e2) ); - - BOOST_CHECK( ! e3.empty ( ) ); - BOOST_CHECK( ! is_scalar(e3) ); - BOOST_CHECK( is_vector(e3) ); - BOOST_CHECK( ! is_matrix(e3) ); - BOOST_CHECK( ! is_tensor(e3) ); - - BOOST_CHECK( ! e4.empty ( ) ); - BOOST_CHECK( ! is_scalar(e4) ); - BOOST_CHECK( ! is_vector(e4) ); - BOOST_CHECK( is_matrix(e4) ); - BOOST_CHECK( ! is_tensor(e4) ); - - BOOST_CHECK( ! e5.empty ( ) ); - BOOST_CHECK( ! is_scalar(e5) ); - BOOST_CHECK( ! is_vector(e5) ); - BOOST_CHECK( is_matrix(e5) ); - BOOST_CHECK( ! is_tensor(e5) ); - - BOOST_CHECK( ! e6.empty ( ) ); - BOOST_CHECK( ! is_scalar(e6) ); - BOOST_CHECK( ! is_vector(e6) ); - BOOST_CHECK( ! is_matrix(e6) ); - BOOST_CHECK( is_tensor(e6) ); - - BOOST_CHECK( ! e7.empty ( ) ); - BOOST_CHECK( ! is_scalar(e7) ); - BOOST_CHECK( ! is_vector(e7) ); - BOOST_CHECK( ! is_matrix(e7) ); - BOOST_CHECK( is_tensor(e7) ); - - BOOST_CHECK( ! e8.empty ( ) ); - BOOST_CHECK( ! is_scalar(e8) ); - BOOST_CHECK( ! is_vector(e8) ); - BOOST_CHECK( ! is_matrix(e8) ); - BOOST_CHECK( is_tensor(e8) ); - - BOOST_CHECK( ! e9.empty ( ) ); - BOOST_CHECK( ! is_scalar(e9) ); - BOOST_CHECK( ! is_vector(e9) ); - BOOST_CHECK( ! is_matrix(e9) ); - BOOST_CHECK( is_tensor(e9) ); - - BOOST_CHECK( ! e10.empty( ) ); - BOOST_CHECK( ! is_scalar(e10) ); - BOOST_CHECK( ! is_vector(e10) ); - BOOST_CHECK( ! is_matrix(e10) ); - BOOST_CHECK( is_tensor(e10) ); - - BOOST_CHECK( ! e11.empty( ) ); - BOOST_CHECK( ! is_scalar(e11) ); - BOOST_CHECK( ! is_vector(e11) ); - BOOST_CHECK( ! is_matrix(e11) ); - BOOST_CHECK( is_tensor(e11) ); - - BOOST_CHECK( ! e12.empty( ) ); - BOOST_CHECK( ! is_scalar(e12) ); - BOOST_CHECK( ! is_vector(e12) ); - BOOST_CHECK( ! is_matrix(e12) ); - BOOST_CHECK( is_tensor(e12) ); - - BOOST_CHECK( ! e13.empty( ) ); - BOOST_CHECK( ! is_scalar(e13) ); - BOOST_CHECK( is_vector(e13) ); - BOOST_CHECK( ! is_matrix(e13) ); - BOOST_CHECK( ! is_tensor(e13) ); - - BOOST_CHECK( ! e14.empty( ) ); - BOOST_CHECK( is_scalar(e14) ); - BOOST_CHECK( ! is_vector(e14) ); - BOOST_CHECK( ! is_matrix(e14) ); - BOOST_CHECK( ! is_tensor(e14) ); - - BOOST_CHECK( ! e15.empty( ) ); - BOOST_CHECK( ! is_scalar(e15) ); - BOOST_CHECK( is_vector(e15) ); - BOOST_CHECK( ! is_matrix(e15) ); - BOOST_CHECK( ! is_tensor(e15) ); - - BOOST_CHECK( ! e16.empty( ) ); - BOOST_CHECK( ! is_scalar(e16) ); - BOOST_CHECK( ! is_vector(e16) ); - BOOST_CHECK( ! is_matrix(e16) ); - BOOST_CHECK( is_tensor(e16) ); - - BOOST_CHECK( ! e17.empty( ) ); - BOOST_CHECK( ! is_scalar(e17) ); - BOOST_CHECK( ! is_vector(e17) ); - BOOST_CHECK( ! is_matrix(e17) ); - BOOST_CHECK( is_tensor(e17) ); } -BOOST_FIXTURE_TEST_CASE(test_fixed_rank_extents_squeeze, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("squeeze")) +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_is, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("query")) { - auto e1 = squeeze(de1); // {1,1} - auto e2 = squeeze(de2); // {1,2} - auto e3 = squeeze(de3); // {2,1} - - auto e4 = squeeze(de4); // {2,3} - auto e5 = squeeze(de5); // {2,3} - auto e6 = squeeze(de6); // {2,3} - auto e7 = squeeze(de7); // {2,3} - auto e8 = squeeze(de8); // {2,3} - - auto e9 = squeeze(de9); // {4,2,3} - auto e10 = squeeze(de10); // {4,2,3} - auto e11 = squeeze(de11); // {4,2,3} - auto e12 = squeeze(de12); // {4,2,3} - - auto e13 = squeeze(de13); // {1,4} - auto e14 = squeeze(de14); // {1,1} - auto e15 = squeeze(de15); // {1,4} - auto e16 = squeeze(de16); // {2,1} - auto e17 = squeeze(de17); // {2,3} - - BOOST_CHECK( (e1 == extents<2>{1,1}) ); - BOOST_CHECK( (e2 == extents<2>{1,2}) ); - BOOST_CHECK( (e3 == extents<2>{2,1}) ); - - BOOST_CHECK( (e4 == extents<2>{2,3}) ); - BOOST_CHECK( (e5 == extents<2>{2,3}) ); - BOOST_CHECK( (e6 == extents<2>{2,3}) ); - BOOST_CHECK( (e7 == extents<2>{2,3}) ); - BOOST_CHECK( (e8 == extents<2>{2,3}) ); - - BOOST_CHECK( (e9 == extents<3>{4,2,3}) ); - BOOST_CHECK( (e10 == extents<3>{4,2,3}) ); - BOOST_CHECK( (e11 == extents<3>{4,2,3}) ); - BOOST_CHECK( (e12 == extents<3>{4,2,3}) ); - - BOOST_CHECK( (e13 == extents<2>{1,4}) ); - BOOST_CHECK( (e14 == extents<2>{1,1}) ); - BOOST_CHECK( (e15 == extents<2>{1,4}) ); - BOOST_CHECK( (e16 == extents<2>{2,1}) ); - BOOST_CHECK( (e17 == extents<2>{2,3}) ); - + namespace ublas = boost::numeric::ublas; + + +// auto e = de; + auto e11 = de11; + auto e12 = de12; + auto e21 = de21; + auto e23 = de23; + auto e231 = de231; + auto e123 = de123; + auto e1123 = de1123; + auto e12311 = de12311; + auto e423 = de423; + auto e4213 = de4213; + auto e42131 = de42131; + auto e142131 = de142131; + auto e141 = de141; + auto e1111 = de1111; + auto e14111 = de14111; + auto e112111 = de112111; + auto e112311 = de112311; + +// BOOST_CHECK( ublas::empty (e)); +// BOOST_CHECK( ! ublas::is_scalar(e)); +// BOOST_CHECK( ! ublas::is_vector(e)); +// BOOST_CHECK( ! ublas::is_matrix(e)); +// BOOST_CHECK( ! ublas::is_tensor(e)); + + BOOST_CHECK( ! ublas::empty (e11) ); + BOOST_CHECK( ublas::is_scalar(e11) ); + BOOST_CHECK( ublas::is_vector(e11) ); + BOOST_CHECK( ublas::is_matrix(e11) ); + BOOST_CHECK( ! ublas::is_tensor(e11) ); + + BOOST_CHECK( ! ublas::empty (e12) ); + BOOST_CHECK( ! ublas::is_scalar(e12) ); + BOOST_CHECK( ublas::is_vector(e12) ); + BOOST_CHECK( ublas::is_matrix(e12) ); + BOOST_CHECK( ! ublas::is_tensor(e12) ); + + BOOST_CHECK( ! ublas::empty (e21) ); + BOOST_CHECK( ! ublas::is_scalar(e21) ); + BOOST_CHECK( ublas::is_vector(e21) ); + BOOST_CHECK( ublas::is_matrix(e21) ); + BOOST_CHECK( ! ublas::is_tensor(e21) ); + + BOOST_CHECK( ! ublas::empty (e23) ); + BOOST_CHECK( ! ublas::is_scalar(e23) ); + BOOST_CHECK( ! ublas::is_vector(e23) ); + BOOST_CHECK( ublas::is_matrix(e23) ); + BOOST_CHECK( ! ublas::is_tensor(e23) ); + + BOOST_CHECK( ! ublas::empty (e231) ); + BOOST_CHECK( ! ublas::is_scalar(e231) ); + BOOST_CHECK( ! ublas::is_vector(e231) ); + BOOST_CHECK( ublas::is_matrix(e231) ); + BOOST_CHECK( ! ublas::is_tensor(e231) ); + + BOOST_CHECK( ! ublas::empty (e123) ); + BOOST_CHECK( ! ublas::is_scalar(e123) ); + BOOST_CHECK( ! ublas::is_vector(e123) ); + BOOST_CHECK( ! ublas::is_matrix(e123) ); + BOOST_CHECK( ublas::is_tensor(e123) ); + + BOOST_CHECK( ! ublas::empty (e1123) ); + BOOST_CHECK( ! ublas::is_scalar(e1123) ); + BOOST_CHECK( ! ublas::is_vector(e1123) ); + BOOST_CHECK( ! ublas::is_matrix(e1123) ); + BOOST_CHECK( ublas::is_tensor(e1123) ); + + BOOST_CHECK( ! ublas::empty (e12311) ); + BOOST_CHECK( ! ublas::is_scalar(e12311) ); + BOOST_CHECK( ! ublas::is_vector(e12311) ); + BOOST_CHECK( ! ublas::is_matrix(e12311) ); + BOOST_CHECK( ublas::is_tensor(e12311) ); + + BOOST_CHECK( ! ublas::empty (e423) ); + BOOST_CHECK( ! ublas::is_scalar(e423) ); + BOOST_CHECK( ! ublas::is_vector(e423) ); + BOOST_CHECK( ! ublas::is_matrix(e423) ); + BOOST_CHECK( ublas::is_tensor(e423) ); + + BOOST_CHECK( ! ublas::empty (e4213) ); + BOOST_CHECK( ! ublas::is_scalar(e4213) ); + BOOST_CHECK( ! ublas::is_vector(e4213) ); + BOOST_CHECK( ! ublas::is_matrix(e4213) ); + BOOST_CHECK( ublas::is_tensor(e4213) ); + + BOOST_CHECK( ! ublas::empty (e42131) ); + BOOST_CHECK( ! ublas::is_scalar(e42131) ); + BOOST_CHECK( ! ublas::is_vector(e42131) ); + BOOST_CHECK( ! ublas::is_matrix(e42131) ); + BOOST_CHECK( ublas::is_tensor(e42131) ); + + BOOST_CHECK( ! ublas::empty (e142131) ); + BOOST_CHECK( ! ublas::is_scalar(e142131) ); + BOOST_CHECK( ! ublas::is_vector(e142131) ); + BOOST_CHECK( ! ublas::is_matrix(e142131) ); + BOOST_CHECK( ublas::is_tensor(e142131) ); + + BOOST_CHECK( ! ublas::empty (e141) ); + BOOST_CHECK( ! ublas::is_scalar(e141) ); + BOOST_CHECK( ublas::is_vector(e141) ); + BOOST_CHECK( ublas::is_matrix(e141) ); + BOOST_CHECK( ! ublas::is_tensor(e141) ); + + BOOST_CHECK( ! ublas::empty (e1111) ); + BOOST_CHECK( ublas::is_scalar(e1111) ); + BOOST_CHECK( ublas::is_vector(e1111) ); + BOOST_CHECK( ublas::is_matrix(e1111) ); + BOOST_CHECK( ! ublas::is_tensor(e1111) ); + + BOOST_CHECK( ! ublas::empty (e14111) ); + BOOST_CHECK( ! ublas::is_scalar(e14111) ); + BOOST_CHECK( ublas::is_vector(e14111) ); + BOOST_CHECK( ublas::is_matrix(e14111) ); + BOOST_CHECK( ! ublas::is_tensor(e14111) ); + + BOOST_CHECK( ! ublas::empty (e112111) ); + BOOST_CHECK( ! ublas::is_scalar(e112111) ); + BOOST_CHECK( ! ublas::is_vector(e112111) ); + BOOST_CHECK( ! ublas::is_matrix(e112111) ); + BOOST_CHECK( ublas::is_tensor(e112111) ); + + BOOST_CHECK( ! ublas::empty (e112311) ); + BOOST_CHECK( ! ublas::is_scalar(e112311) ); + BOOST_CHECK( ! ublas::is_vector(e112311) ); + BOOST_CHECK( ! ublas::is_matrix(e112311) ); + BOOST_CHECK( ublas::is_tensor(e112311) ); } - -BOOST_FIXTURE_TEST_CASE(test_fixed_rank_extents_product, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("product")) +//BOOST_FIXTURE_TEST_CASE(test_extents_static_size_squeeze, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("squeeze")) +//{ +// auto e1 = squeeze(de1); // {1,1} +// auto e2 = squeeze(de2); // {1,2} +// auto 21 = squeeze(d21); // {2,1} + +// auto e4 = squeeze(de4); // {2,3} +// auto e231 = squeeze(de231); // {2,3} +// auto e123 = squeeze(de123); // {2,3} +// auto e1123 = squeeze(de1123); // {2,3} +// auto e12311 = squeeze(de12311); // {2,3} + +// auto e423 = squeeze(de423); // {4,2,3} +// auto e4213 = squeeze(de4213); // {4,2,3} +// auto e11 = squeeze(de11); // {4,2,3} +// auto e12 = squeeze(e142131); // {4,2,3} + +// auto e141 = squeeze(de141); // {1,4} +// auto e1111 = squeeze(de1111); // {1,1} +// auto e14111 = squeeze(de14111); // {1,4} +// auto e112111 = squeeze(de112111); // {2,1} +// auto e112311 = squeeze(de112311); // {2,3} + +// BOOST_CHECK( (e1 == extents<2>{1,1}) ); +// BOOST_CHECK( (e2 == extents<2>{1,2}) ); +// BOOST_CHECK( (21 == extents<2>{2,1}) ); + +// BOOST_CHECK( (e4 == extents<2>{2,3}) ); +// BOOST_CHECK( (e231 == extents<2>{2,3}) ); +// BOOST_CHECK( (e123 == extents<2>{2,3}) ); +// BOOST_CHECK( (e1123 == extents<2>{2,3}) ); +// BOOST_CHECK( (e12311 == extents<2>{2,3}) ); + +// BOOST_CHECK( (e423 == extents<3>{4,2,3}) ); +// BOOST_CHECK( (e4213 == extents<3>{4,2,3}) ); +// BOOST_CHECK( (e11 == extents<3>{4,2,3}) ); +// BOOST_CHECK( (e12 == extents<3>{4,2,3}) ); + +// BOOST_CHECK( (e141 == extents<2>{1,4}) ); +// BOOST_CHECK( (e1111 == extents<2>{1,1}) ); +// BOOST_CHECK( (e14111 == extents<2>{1,4}) ); +// BOOST_CHECK( (e112111 == extents<2>{2,1}) ); +// BOOST_CHECK( (e112311 == extents<2>{2,3}) ); + +//} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_product, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("product")) { - - auto e0 = product( de0 ); // {} - auto e1 = product( de1 ); // {1,1} - auto e2 = product( de2 ); // {1,2} - auto e3 = product( de3 ); // {2,1} - auto e4 = product( de4 ); // {2,3} - auto e5 = product( de5 ); // {2,3,1} - auto e6 = product( de6 ); // {1,2,3} - auto e7 = product( de7 ); // {1,1,2,3} - auto e8 = product( de8 ); // {1,2,3,1,1} - auto e9 = product( de9 ); // {4,2,3} - auto e10 = product( de10 ); // {4,2,1,3} - auto e11 = product( de11 ); // {4,2,1,3,1} - auto e12 = product( de12 ); // {1,4,2,1,3,1} - auto e13 = product( de13 ); // {1,4,1} - auto e14 = product( de14 ); // {1,1,1,1} - auto e15 = product( de15 ); // {1,4,1,1,1} - auto e16 = product( de16 ); // {1,1,2,1,1,1} - auto e17 = product( de17 ); // {1,1,2,3,1,1} - - BOOST_CHECK_EQUAL( e0 , 0 ); - BOOST_CHECK_EQUAL( e1 , 1 ); - BOOST_CHECK_EQUAL( e2 , 2 ); - BOOST_CHECK_EQUAL( e3 , 2 ); - BOOST_CHECK_EQUAL( e4 , 6 ); - BOOST_CHECK_EQUAL( e5 , 6 ); - BOOST_CHECK_EQUAL( e6 , 6 ); - BOOST_CHECK_EQUAL( e7 , 6 ); - BOOST_CHECK_EQUAL( e8 , 6 ); - BOOST_CHECK_EQUAL( e9 , 24 ); - BOOST_CHECK_EQUAL( e10, 24 ); - BOOST_CHECK_EQUAL( e11, 24 ); - BOOST_CHECK_EQUAL( e12, 24 ); - BOOST_CHECK_EQUAL( e13, 4 ); - BOOST_CHECK_EQUAL( e14, 1 ); - BOOST_CHECK_EQUAL( e15, 4 ); - BOOST_CHECK_EQUAL( e16, 2 ); - BOOST_CHECK_EQUAL( e17, 6 ); + namespace ublas = boost::numeric::ublas; + +// auto e = ublas::product( de ); + auto e11 = ublas::product( de11 ); + auto e12 = ublas::product( de12 ); + auto e21 = ublas::product( de21 ); + auto e23 = ublas::product( de23 ); + auto e231 = ublas::product( de231 ); + auto e123 = ublas::product( de123 ); + auto e1123 = ublas::product( de1123 ); + auto e12311 = ublas::product( de12311 ); + auto e423 = ublas::product( de423 ); + auto e4213 = ublas::product( de4213 ); + auto e42131 = ublas::product( de42131 ); + auto e142131 = ublas::product( de142131 ); + auto e141 = ublas::product( de141 ); + auto e1111 = ublas::product( de1111 ); + auto e14111 = ublas::product( de14111 ); + auto e112111 = ublas::product( de112111 ); + auto e112311 = ublas::product( de112311 ); + +// BOOST_CHECK_EQUAL( e , 0 ); + BOOST_CHECK_EQUAL( e11 , 1 ); + BOOST_CHECK_EQUAL( e12 , 2 ); + BOOST_CHECK_EQUAL( e21 , 2 ); + BOOST_CHECK_EQUAL( e23 , 6 ); + BOOST_CHECK_EQUAL( e231 , 6 ); + BOOST_CHECK_EQUAL( e123 , 6 ); + BOOST_CHECK_EQUAL( e1123 , 6 ); + BOOST_CHECK_EQUAL( e12311 , 6 ); + BOOST_CHECK_EQUAL( e423 , 24 ); + BOOST_CHECK_EQUAL( e4213 , 24 ); + BOOST_CHECK_EQUAL( e42131 , 24 ); + BOOST_CHECK_EQUAL( e142131, 24 ); + BOOST_CHECK_EQUAL( e141 , 4 ); + BOOST_CHECK_EQUAL( e1111 , 1 ); + BOOST_CHECK_EQUAL( e14111 , 4 ); + BOOST_CHECK_EQUAL( e112111, 2 ); + BOOST_CHECK_EQUAL( e112311, 6 ); } diff --git a/test/tensor/test_fixed_rank_functions.cpp b/test/tensor/test_fixed_rank_functions.cpp index 8ccea3f76..df3d4ce80 100644 --- a/test/tensor/test_fixed_rank_functions.cpp +++ b/test/tensor/test_fixed_rank_functions.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -23,7 +23,7 @@ #include "utility.hpp" // BOOST_AUTO_TEST_SUITE ( test_tensor_functions, * boost::unit_test::depends_on("test_tensor_contraction") ) -BOOST_AUTO_TEST_SUITE ( test_fixed_rank_tensor_functions) +BOOST_AUTO_TEST_SUITE ( test_tensor_extents_static_size_functions) using test_types = zip>::with_t; @@ -33,372 +33,427 @@ using test_types = zip>::with_t - using fixed_rank_extents_type = boost::numeric::ublas::extents; - - using dynamic_extents_type = boost::numeric::ublas::extents<>; - fixture() - : extents { - dynamic_extents_type{1,1}, // 1 - dynamic_extents_type{2,3}, // 2 - dynamic_extents_type{2,3,1}, // 3 - dynamic_extents_type{4,2,3}, // 4 - dynamic_extents_type{4,2,3,5}} // 5 - { - } - - std::tuple< - fixed_rank_extents_type<2>, - fixed_rank_extents_type<2>, - fixed_rank_extents_type<3>, - fixed_rank_extents_type<3>, - fixed_rank_extents_type<4> - > fixed_rank_extents{ - {1,1}, // 1 - {2,3}, // 2 - {2,3,1}, // 3 - {4,2,3}, // 4 - {4,2,3,5} // 5 + std::tuple< + boost::numeric::ublas::extents<2>, + boost::numeric::ublas::extents<2>, + boost::numeric::ublas::extents<3>, + boost::numeric::ublas::extents<3>, + boost::numeric::ublas::extents<4> + > extents_tuple{ + {1,1}, // 1 + {2,3}, // 2 + {2,3,1}, // 3 + {4,2,3}, // 4 + {4,2,3,5} // 5 }; - std::vector extents; + std::vector> extents_vector = + { + {1,1}, // 1 + {2,3}, // 2 + {2,3,1}, // 3 + {4,2,3}, // 4 + {4,2,3,5} // 5 + }; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_prod_vector, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_prod_vector, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(fixed_rank_extents,[](auto const&, auto & n){ - using extents_type = typename std::decay::type; - using tensor_type = ublas::fixed_rank_tensor; - using vector_type = typename tensor_type::vector_type; - auto a = tensor_type(n, value_type{2}); - - for (auto m = 0u; m < n.size(); ++m) { - auto b = vector_type(n[m], value_type{1}); - - auto c = ublas::prod(a, b, m + 1); - - for (auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL(c[i], value_type( static_cast< inner_type_t >(n[m]) ) * a[i]); - } - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents_tuple,[](auto const& /*unused*/, auto const& n){ + + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using vector_t = typename tensor_t::vector_type; + auto a = tensor_t(n); + a = 2; + + for (auto m = 0u; m < ublas::size(n); ++m) { + auto b = vector_t(n[m], value_t{1}); + + auto c = ublas::prod(a, b, m + 1); + + for (auto i = 0u; i < c.size(); ++i) + BOOST_CHECK_EQUAL(c[i], value_t( static_cast< inner_type_t >(n[m]) ) * a[i]); + } + }); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_prod_matrix, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_prod_matrix, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(fixed_rank_extents,[](auto const&, auto & n){ - using extents_type = typename std::decay::type; - using tensor_type = ublas::fixed_rank_tensor; - using matrix_type = typename tensor_type::matrix_type; - auto a = tensor_type(n, value_type{2}); - for (auto m = 0u; m < n.size(); ++m) { - - auto b = matrix_type ( n[m], n[m], value_type{1} ); - - auto c = ublas::prod(a, b, m + 1); - - for (auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL(c[i], value_type( static_cast< inner_type_t >(n[m]) ) * a[i]); - } - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents_tuple,[](auto const& /*unused*/, auto const & n){ + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using matrix_t = typename tensor_t::matrix_type; + + auto a = tensor_t(n); + a = 2; + for (auto m = 0u; m < ublas::size(n); ++m) { + + auto b = matrix_t ( n[m], n[m], value_t{1} ); + auto c = ublas::prod(a, b, m + 1); + + for (auto i = 0u; i < c.size(); ++i){ + BOOST_CHECK_EQUAL(c[i], value_t( static_cast< inner_type_t >(n[m]) ) * a[i]); + } + } + }); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_prod_tensor_1, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_prod_tensor_1, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto const body = [](auto const& a, auto const& b){ - auto const pa = a.rank(); - - for (auto q = 0ul; q <= pa; ++q) { - - auto phi = std::vector(q); - - std::iota(phi.begin(), phi.end(), 1ul); - - auto c = ublas::prod(a, b, phi); - - auto acc = value_type(1); - for (auto i = 0ul; i < q; ++i) - acc *= value_type( static_cast< inner_type_t >( a.extents().at(phi.at(i) - 1) ) ); - - for (auto i = 0ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL(c[i], acc *a[0] * b[0]); - } - }; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + auto check = [&](auto const& a, auto const& b, std::index_sequence /*unused*/) + { + namespace ublas = boost::numeric::ublas; - for_each_tuple(fixed_rank_extents,[&](auto const&, auto & n){ - auto n1 = n; - auto n2 = n; - using extents_type_1 = typename std::decay::type; - using extents_type_2 = typename std::decay::type; - using tensor_type_1 = - ublas::fixed_rank_tensor; - using tensor_type_2 = - ublas::fixed_rank_tensor; - auto a = tensor_type_1(n1, value_type{2}); - auto b = tensor_type_2(n2, value_type{3}); - body(a,b); - }); + constexpr auto q = sizeof...(qs); - for_each_tuple(fixed_rank_extents,[&](auto const& I, auto & n){ - auto n1 = n; - auto n2 = extents[I]; - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = - ublas::fixed_rank_tensor; - using tensor_type_2 = - ublas::dynamic_tensor; - auto a = tensor_type_1(n1, value_type{2}); - auto b = tensor_type_2(n2, value_type{3}); - body(a,b); - }); + using tensorA = std::decay_t; + using tensorB = std::decay_t; - for_each_tuple(fixed_rank_extents,[&](auto const& I, auto & n){ - auto n1 = extents[I]; - auto n2 = n; - using extents_type_2 = typename std::decay::type; - using tensor_type_1 = - ublas::dynamic_tensor; - using tensor_type_2 = - ublas::fixed_rank_tensor; - auto a = tensor_type_1(n1, value_type{2}); - auto b = tensor_type_2(n2, value_type{3}); - body(a,b); - }); + using extentsA = typename tensorA::extents_type; + using extentsB = typename tensorB::extents_type; -} + static_assert(!ublas::is_static_v && !ublas::is_static_v ); -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_prod_tensor_2, value, test_types, fixture ) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + constexpr auto one_of_extents_is_resizable = ublas::is_dynamic_rank_v || + ublas::is_dynamic_rank_v; + using phi_type = std::conditional_t, + std::array >; + auto phi = phi_type{}; + if constexpr(std::is_same_v>){ + phi.resize(q); + } + std::iota(phi.begin(), phi.end(), std::size_t{1}); + auto c = ublas::prod(a, b, phi); - auto compute_factorial = [](auto const& p){ - auto f = 1ul; - for(auto i = 1u; i <= p; ++i) - f *= i; - return f; - }; + auto const& na = a.extents(); + auto acc = std::size_t{1}; + for (auto i = 0ul; i < q; ++i){ + acc *= na.at(phi.at(i)-1); + } + const auto v = value_t(acc) * a[0] * b[0]; + BOOST_CHECK( std::all_of(c.begin(),c.end(),[v](auto cc){ return cc == v;})); + }; + + + for_each_in_tuple(extents_tuple,[&](auto const& /*I*/, auto const& n){ + constexpr auto size = std::tuple_size_v>; + constexpr auto modes = std::make_index_sequence{}; + using tensor_t = ublas::tensor_static_rank; + auto a = tensor_t(n); + auto b = tensor_t(n); + a = 2; + b = 3; + for_each_in_index(modes, a,b, check ); + }); + + for_each_in_tuple(extents_tuple,[&](auto const& I, auto const& n){ + auto const& nA = n; + auto const& nB = extents_vector[I]; + constexpr auto sizeA = std::tuple_size_v>; + constexpr auto modes = std::make_index_sequence{}; + using tensorA_type = ublas::tensor_static_rank; + using tensorB_type = ublas::tensor_dynamic; + auto a = tensorA_type(nA); + auto b = tensorB_type(nB); + a = 2; + b = 3; + + for_each_in_index(modes, a,b, check ); + }); + + for_each_in_tuple(extents_tuple,[&](auto const& I, auto const& n){ + auto const& nA = extents_vector[I]; + auto const& nB = n; + constexpr auto sizeB = std::tuple_size_v>; + constexpr auto modes = std::make_index_sequence{}; + using tensor_t_1 = ublas::tensor_dynamic; + using tensor_t_2 = ublas::tensor_static_rank; + auto a = tensor_t_1(nA); + auto b = tensor_t_2(nB); + a = 2; + b = 3; + for_each_in_index(modes, a,b, check ); + + }); +} - auto permute_extents_s_1 = [](auto const& pi, auto const& na){ +// TODO: +#if 0 - auto nb = ublas::extents<>(na); - assert(pi.size() == na.size()); - for(auto j = 0u; j < pi.size(); ++j) - nb[pi[j]-1] = na[j]; - return nb; - }; - auto permute_extents_s_2 = [](auto const& pi, auto const& na){ - auto tempn = na.base(); - assert(pi.size() == na.size()); - for(auto j = 0u; j < pi.size(); ++j) - tempn[pi[j]-1] = na[j]; - return ublas::extents::type::_size>(tempn.begin(),tempn.end()); - }; +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_prod_tensor_2, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + constexpr auto to_array = [](std::index_sequence/*unused*/) { + return std::array{is...}; + }; + + auto compute_factorial = [](std::index_sequence/*unused*/) { + return ( 1 * ... * is ); + }; + /* + auto compute_factorial = [](auto const& p){ + auto f = 1ul; + for(auto i = 1u; i <= p; ++i) + f *= i; + return f; + }; +*/ + auto permute_extents_dynamic_rank = [](auto const& pi, auto const& na){ + auto nb = ublas::extents<>(na.begin(),na.end()); + assert(std::size(pi) == ublas::size(na)); + for(auto j = 0u; j < std::size(pi); ++j) + nb[pi[j]-1] = na[j]; + return nb; + }; + + auto permute_extents_static_rank = [](std::array const& pi, auto const& na){ + //constexpr auto size = std::tuple_size_v>; + auto na_base = na.base(); + assert(std::size(pi) == size); + for(auto j = 0u; j < std::size(pi); ++j) + na_base[pi[j]-1] = na[j]; + return ublas::extents(na_base.begin(),na_base.end()); + }; + + for_each_in_tuple(extents_tuple,[&](auto const& /*unused*/, auto const& n){ + auto const& na = n; + constexpr auto size = std::tuple_size_v>; + using tensorA_type = ublas::tensor_static_rank; + auto a = tensorA_type(na); + a = 2; + assert(a.rank() == size); + // auto const pa = a.rank(); + auto pi = to_array(std::make_index_sequence{}); + constexpr auto factorial = compute_factorial(std::make_index_sequence{}); + // auto pi = std::vector(pa); + // auto fac = compute_factorial(pa); + // std::iota(pi.begin(), pi.end(), 1); + + constexpr auto factorials = std::make_index_sequence{}; + + // for_each_in_tuple(factorials,[&](auto const& /*unused*/, auto const& /*unused*/){ + // using tensorB_type = ublas::tensor_dynamic; + // const auto nb = permute_extents_dynamic_rank(pi, na); + // const auto b = tensorB_type(nb, value_t{3}); + + // constexpr auto modes = std::make_index_sequence{}; + + // for_each_in_tuple(modes,[&](auto const& /*unused*/, auto const& /*unused*/){ + + + // const auto phia = to_array(std::make_index_sequence); + // const auto phib = std::array(q); + + // }); + + // for (auto f = 0ul; f < fac; ++f) { + // for (auto q = 0ul; q <= pa; ++q) { + + // auto phia = std::vector(q); + // auto phib = std::vector(q); + + // std::iota(phia.begin(), phia.end(), 1ul); + // std::transform(phia.begin(), phia.end(), phib.begin(), + // [&pi](std::size_t i) { return pi.at(i - 1); }); + + // auto c = ublas::prod(a, b, phia, phib); + + // auto acc = value_t(1); + // for (auto i = 0ul; i < q; ++i) + // acc *= value_t( static_cast< inner_type_t >( a.extents().at(phia.at(i) - 1) ) ); + + // for (auto i = 0ul; i < c.size(); ++i) + // BOOST_CHECK_EQUAL(c[i], acc *a[0] * b[0]); + // } + + // std::next_permutation(pi.begin(), pi.end()); + // } + }); + + for_each_in_tuple(extents_tuple,[&](auto const& /*unused*/, auto & /*n*/){ + // auto const& na = n; + // constexpr auto size = std::tuple_size_v>; + // using tensor_t_1 = ublas::tensor_static_rank; + // auto a = tensor_t_1(na, value_t{2}); + // auto const pa = a.rank(); + + // auto pi = std::vector(pa); + // auto fac = compute_factorial(pa); + // std::iota(pi.begin(), pi.end(), 1); + + // for (auto f = 0ul; f < fac; ++f) { + // auto nb = permute_extents_static_rank(pi, na); + + // using tensor_t_2 = ublas::tensor_static_rank; + // auto b = tensor_t_2(nb, value_t{3}); + + // for (auto q = 0ul; q <= pa; ++q) { + + // auto phia = std::vector(q); + // auto phib = std::vector(q); + + // std::iota(phia.begin(), phia.end(), 1ul); + // std::transform(phia.begin(), phia.end(), phib.begin(), + // [&pi](std::size_t i) { return pi.at(i - 1); }); + + // auto c = ublas::prod(a, b, phia, phib); - for_each_tuple(fixed_rank_extents,[&](auto const&, auto & n){ - auto na = n; - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(na, value_type{2}); - auto const pa = a.rank(); - - auto pi = std::vector(pa); - auto fac = compute_factorial(pa); - std::iota(pi.begin(), pi.end(), 1); - - for (auto f = 0ul; f < fac; ++f) { - auto nb = permute_extents_s_1(pi, na); - using tensor_type_2 = ublas::dynamic_tensor; - auto b = tensor_type_2(nb, value_type{3}); - - for (auto q = 0ul; q <= pa; ++q) { - - auto phia = std::vector(q); - auto phib = std::vector(q); - - std::iota(phia.begin(), phia.end(), 1ul); - std::transform(phia.begin(), phia.end(), phib.begin(), - [&pi](std::size_t i) { return pi.at(i - 1); }); - - auto c = ublas::prod(a, b, phia, phib); - - auto acc = value_type(1); - for (auto i = 0ul; i < q; ++i) - acc *= value_type( static_cast< inner_type_t >( a.extents().at(phia.at(i) - 1) ) ); - - for (auto i = 0ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL(c[i], acc *a[0] * b[0]); - } - - std::next_permutation(pi.begin(), pi.end()); - } - }); + // auto acc = value_t(1); + // for (auto i = 0ul; i < q; ++i){ + // acc *= value_t( static_cast< inner_type_t >( a.extents().at(phia.at(i) - 1) ) ); + // } - for_each_tuple(fixed_rank_extents,[&](auto const&, auto & n){ - auto na = n; - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(na, value_type{2}); - auto const pa = a.rank(); - - auto pi = std::vector(pa); - auto fac = compute_factorial(pa); - std::iota(pi.begin(), pi.end(), 1); - - for (auto f = 0ul; f < fac; ++f) { - auto nb = permute_extents_s_2(pi, na); - - using extents_type_2 = typename std::decay::type; - using tensor_type_2 = ublas::fixed_rank_tensor; - auto b = tensor_type_2(nb, value_type{3}); - - for (auto q = 0ul; q <= pa; ++q) { - - auto phia = std::vector(q); - auto phib = std::vector(q); - - std::iota(phia.begin(), phia.end(), 1ul); - std::transform(phia.begin(), phia.end(), phib.begin(), - [&pi](std::size_t i) { return pi.at(i - 1); }); - - auto c = ublas::prod(a, b, phia, phib); - - auto acc = value_type(1); - for (auto i = 0ul; i < q; ++i) - acc *= value_type( static_cast< inner_type_t >( a.extents().at(phia.at(i) - 1) ) ); - - for (auto i = 0ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL(c[i], acc *a[0] * b[0]); - } - - std::next_permutation(pi.begin(), pi.end()); - } - }); + // for (auto i = 0ul; i < c.size(); ++i) + // BOOST_CHECK_EQUAL(c[i], acc *a[0] * b[0]); + // } -} + // std::next_permutation(pi.begin(), pi.end()); + // } + }); +} +#endif -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_inner_prod, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_inner_prod, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto const body = [&](auto const& a, auto const& b){ - auto c = ublas::inner_prod(a, b); - auto r = std::inner_product(a.begin(),a.end(), b.begin(),value_type(0)); - BOOST_CHECK_EQUAL( c , r ); - }; + using dtensor_t = ublas::tensor_dynamic; - for_each_tuple(fixed_rank_extents,[&](auto const&, auto & n){ - using extents_type_1 = typename std::decay::type; - using extents_type_2 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - using tensor_type_2 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(n, value_type(2)); - auto b = tensor_type_2(n, value_type(1)); - body(a,b); + auto const body = [&](auto const& a, auto const& b){ + auto c = ublas::inner_prod(a, b); + auto r = std::inner_product(a.begin(),a.end(), b.begin(),value_t(0)); + BOOST_CHECK_EQUAL( c , r ); + }; - }); + for_each_in_tuple(extents_tuple,[&](auto const& /*unused*/, auto & n){ + constexpr auto size = std::tuple_size_v>; + using stensor_t = ublas::tensor_static_rank; + auto a = stensor_t(n); + auto b = stensor_t(n); + a = 2; + b = 3; + body(a,b); - for_each_tuple(fixed_rank_extents,[&](auto const& I, auto & n){ - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - using tensor_type_2 = ublas::dynamic_tensor; - auto a = tensor_type_1(n, value_type(2)); - auto b = tensor_type_2(extents[I], value_type(1)); - body(a,b); + }); - }); + for_each_in_tuple(extents_tuple,[&](auto const& I, auto & n){ + constexpr auto size = std::tuple_size_v>; + using stensor_t = ublas::tensor_static_rank; + auto a = stensor_t(n); + auto b = dtensor_t(extents_vector[I]); + a = 2; + b = 1; - for_each_tuple(fixed_rank_extents,[&](auto const& I, auto & n){ - using extents_type_2 = typename std::decay::type; - using tensor_type_1 = ublas::dynamic_tensor; - using tensor_type_2 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(extents[I], value_type(2)); - auto b = tensor_type_2(n, value_type(1)); - body(a,b); + body(a,b); - }); + }); + + for_each_in_tuple(extents_tuple,[&](auto const& I, auto & n){ + constexpr auto size = std::tuple_size_v>; + using stensor_t = ublas::tensor_static_rank; + auto a = dtensor_t(extents_vector[I]); + auto b = stensor_t(n); + a = 2; + b = 1; + body(a,b); + + }); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_fixed_rank_tensor_outer_prod, value, test_types, fixture ) + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_extents_static_size_outer_prod, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(fixed_rank_extents,[&](auto const&, auto const& n1){ - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(n1, value_type(2)); - for_each_tuple(fixed_rank_extents,[&](auto const& /*J*/, auto const& n2){ - using extents_type_2 = typename std::decay::type; - using tensor_type_2 = ublas::fixed_rank_tensor; - auto b = tensor_type_2(n2, value_type(1)); - auto c = ublas::outer_prod(a, b); - - for(auto const& cc : c) - BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); - - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents_tuple,[&](auto const& /*unused*/, auto const& n1){ + constexpr auto size1 = std::tuple_size_v>; + using tensor_t_1 = ublas::tensor_static_rank; + auto a = tensor_t_1(n1); + a = 2; + for_each_in_tuple(extents_tuple,[&](auto const& /*J*/, auto const& n2){ + constexpr auto size2 = std::tuple_size_v>; + using tensor_t_2 = ublas::tensor_static_rank; + auto b = tensor_t_2(n2); + b = 1; + auto c = ublas::outer_prod(a, b); + + BOOST_CHECK ( std::all_of(c.begin(),c.end(), [&a,&b](auto cc){return cc == a[0]*b[0];}) ); }); - for_each_tuple(fixed_rank_extents,[&](auto const& I, auto const& /*n1*/){ - using tensor_type_1 = ublas::dynamic_tensor; - auto a = tensor_type_1(extents[I], value_type(2)); - for_each_tuple(fixed_rank_extents,[&](auto const& /*J*/, auto const& n2){ - using extents_type_2 = typename std::decay::type; - using tensor_type_2 = ublas::fixed_rank_tensor; - auto b = tensor_type_2(n2, value_type(1)); - auto c = ublas::outer_prod(a, b); + }); - for(auto const& cc : c) - BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); - }); + for_each_in_tuple(extents_tuple,[&](auto const& I, auto const& /*n1*/){ + using tensor_t_1 = ublas::tensor_dynamic; + auto a = tensor_t_1(extents_vector[I]); + a = 2; + for_each_in_tuple(extents_tuple,[&](auto const& /*J*/, auto const& n2){ + constexpr auto size = std::tuple_size_v>; + using tensor_t_2 = ublas::tensor_static_rank; + auto b = tensor_t_2(n2); + b = 1; + auto c = ublas::outer_prod(a, b); + BOOST_CHECK ( std::all_of(c.begin(),c.end(), [&a,&b](auto cc){return cc == a[0]*b[0];}) ); + +// for(auto const& cc : c) +// BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); }); - for_each_tuple(fixed_rank_extents,[&](auto const&, auto const& n1){ - using extents_type_1 = typename std::decay::type; - using tensor_type_1 = ublas::fixed_rank_tensor; - auto a = tensor_type_1(n1, value_type(2)); - for(auto n2 : extents){ - using tensor_type_2 = ublas::dynamic_tensor; - auto b = tensor_type_2(n2, value_type(1)); - auto c = ublas::outer_prod(a, b); + }); - for(auto const& cc : c) - BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); - } + for_each_in_tuple(extents_tuple,[&](auto const& /*unused*/, auto const& n1){ + constexpr auto size = std::tuple_size_v>; + using tensor_t_1 = ublas::tensor_static_rank; + auto a = tensor_t_1(n1); + a = 2; + for(auto const& n2 : extents_vector){ + using tensor_t_2 = ublas::tensor_dynamic; + auto b = tensor_t_2(n2); + b = 1; + auto c = ublas::outer_prod(a, b); - }); + BOOST_CHECK ( std::all_of(c.begin(),c.end(), [&a,&b](auto cc){return cc == a[0]*b[0];}) ); + +// for(auto const& cc : c) +// BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); + } + + }); } diff --git a/test/tensor/test_fixed_rank_operators_arithmetic.cpp b/test/tensor/test_fixed_rank_operators_arithmetic.cpp index e5bbcf86a..08ef6b8f9 100644 --- a/test/tensor/test_fixed_rank_operators_arithmetic.cpp +++ b/test/tensor/test_fixed_rank_operators_arithmetic.cpp @@ -18,7 +18,7 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_fixed_rank_tensor_arithmetic_operations) +BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_arithmetic_operations) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; @@ -26,136 +26,131 @@ using test_types = zip::with_t - using extents_type = boost::numeric::ublas::extents; - - std::tuple< - extents_type<2>, // 1 - extents_type<2>, // 2 - extents_type<3>, // 3 - extents_type<3>, // 4 - extents_type<4> // 5 - > extents = { - extents_type<2>{1,1}, - extents_type<2>{2,3}, - extents_type<3>{4,1,3}, - extents_type<3>{4,2,3}, - extents_type<4>{4,2,3,5} - }; + template + using extents_t = boost::numeric::ublas::extents; + + std::tuple< + extents_t<2>, // 1 + extents_t<2>, // 2 + extents_t<3>, // 3 + extents_t<3>, // 4 + extents_t<4> // 5 + > extents = { + extents_t<2>{1,1}, + extents_t<2>{2,3}, + extents_t<3>{4,1,3}, + extents_t<3>{4,2,3}, + extents_t<4>{4,2,3,5} + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto r = tensor_type (e); - auto v = value_type {}; + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto r = tensor_t (e); + auto v = value_t {}; - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); - r = t + t + t + t2; + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + r = t + t + t + t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 3*t(i) + t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 3*t(i) + t2(i) ); - r = t2 / (t+3) * (t+1) - t2; // r = ( t2/ ((t+3)*(t+1)) ) - t2 + r = t2 / (t+3) * (t+1) - t2; // r = ( t2/ ((t+3)*(t+1)) ) - t2 - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), t2(i) / (t(i)+3)*(t(i)+1) - t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), t2(i) / (t(i)+3)*(t(i)+1) - t2(i) ); - r = 3+t2 / (t+3) * (t+1) * t - t2; // r = 3+( t2/ ((t+3)*(t+1)*t) ) - t2 + r = 3+t2 / (t+3) * (t+1) * t - t2; // r = 3+( t2/ ((t+3)*(t+1)*t) ) - t2 - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 3+t2(i) / (t(i)+3)*(t(i)+1)*t(i) - t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 3+t2(i) / (t(i)+3)*(t(i)+1)*t(i) - t2(i) ); - r = t2 - t + t2 - t; + r = t2 - t + t2 - t; - for(auto i = 0ul; i < r.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 4 ); + for(auto i = 0ul; i < r.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 4 ); - r = tensor_type (e,1) + tensor_type (e,1); + r = t * t * t * t2; - for(auto i = 0ul; i < r.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), t(i)*t(i)*t(i)*t2(i) ); - r = t * t * t * t2; + r = (t2/t2) * (t2/t2); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), t(i)*t(i)*t(i)*t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 1 ); + }; - r = (t2/t2) * (t2/t2); - - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 1 ); - }; - - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto v = value_type {}; + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); - tensor_type r1 = t + 2 + t + 2; + tensor_t r1 = t + 2 + t + 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r1(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r1(i), 2*t(i) + 4 ); - tensor_type r2 = 2 + t + 2 + t; + tensor_t r2 = 2 + t + 2 + t; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r2(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r2(i), 2*t(i) + 4 ); - tensor_type r3 = (t-2) + (t-2); + tensor_t r3 = (t-2) + (t-2); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r3(i), 2*t(i) - 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r3(i), 2*t(i) - 4 ); - tensor_type r4 = (t*2) * (3*t); + tensor_t r4 = (t*2) * (3*t); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r4(i), 2*3*t(i)*t(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r4(i), 2*3*t(i)*t(i) ); - tensor_type r5 = (t2*2) / (2*t2) * t2; + tensor_t r5 = (t2*2) / (2*t2) * t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r5(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r5(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); - tensor_type r6 = (t2/2+1) / (2/t2+1) / t2; + tensor_t r6 = (t2/2+1) / (2/t2+1) / t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r6(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r6(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); - }; + }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } @@ -164,79 +159,79 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto r = tensor_type (e); - auto v = value_type {}; + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto r = tensor_t (e); + auto v = value_t {}; - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); - r = t + 2; - r += t; - r += 2; + r = t + 2; + r += t; + r += 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); - r = 2 + t; - r += t; - r += 2; + r = 2 + t; + r += t; + r += 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); - r = (t-2); - r += t; - r -= 2; + r = (t-2); + r += t; + r -= 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) - 4 ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) - 4 ); - r = (t*2); - r *= 3; - r *= t; + r = (t*2); + r *= 3; + r *= t; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*3*t(i)*t(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*3*t(i)*t(i) ); - r = (t2*2); - r /= 2; - r /= t2; - r *= t2; + r = (t2*2); + r /= 2; + r /= t2; + r *= t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); - r = (t2/2+1); - r /= (2/t2+1); - r /= t2; + r = (t2/2+1); + r /= (2/t2+1); + r /= t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); - tensor_type q = -r; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( q(i), -r(i) ); + tensor_t q = -r; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( q(i), -r(i) ); - tensor_type p = +r; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( p(i), r(i) ); - }; + tensor_t p = +r; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( p(i), r(i) ); + }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } diff --git a/test/tensor/test_fixed_rank_operators_comparison.cpp b/test/tensor/test_fixed_rank_operators_comparison.cpp index a4c60da0c..59338c387 100644 --- a/test/tensor/test_fixed_rank_operators_comparison.cpp +++ b/test/tensor/test_fixed_rank_operators_comparison.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -17,187 +17,181 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_fixed_rank_tensor_comparison) +BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_comparison) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; using test_types = zip::with_t; struct fixture { - template - using extents_type = boost::numeric::ublas::extents; - - std::tuple< - extents_type<2>, // 1 - extents_type<2>, // 2 - extents_type<3>, // 3 - extents_type<3>, // 4 - extents_type<4> // 5 + template + using extents_t = boost::numeric::ublas::extents; + + std::tuple< + extents_t<2>, // 1 + extents_t<2>, // 2 + extents_t<3>, // 3 + extents_t<3>, // 4 + extents_t<4> // 5 > extents = { - extents_type<2>{1,1}, - extents_type<2>{2,3}, - extents_type<3>{4,1,3}, - extents_type<3>{4,2,3}, - extents_type<4>{4,2,3,5} - }; + extents_t<2>{1,1}, + extents_t<2>{2,3}, + extents_t<3>{4,1,3}, + extents_t<3>{4,2,3}, + extents_t<4>{4,2,3,5} + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto v = value_type {}; - - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); - - BOOST_CHECK( t == t ); - BOOST_CHECK( t != t2 ); - - if(t.empty()) - return; - - BOOST_CHECK(!(t < t)); - BOOST_CHECK(!(t > t)); - BOOST_CHECK( t < t2 ); - BOOST_CHECK( t2 > t ); - BOOST_CHECK( t <= t ); - BOOST_CHECK( t >= t ); - BOOST_CHECK( t <= t2 ); - BOOST_CHECK( t2 >= t ); - BOOST_CHECK( t2 >= t2 ); - BOOST_CHECK( t2 >= t ); - }; - - for_each_tuple(extents,check); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) + { + using extents_t = std::decay_t; + using tensor_t = ublas::tensor_static_rank, layout_t>; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + BOOST_CHECK( t == t ); + BOOST_CHECK( t != t2 ); + + if(t.empty()) + return; + + BOOST_CHECK(!(t < t)); + BOOST_CHECK(!(t > t)); + BOOST_CHECK( t < t2 ); + BOOST_CHECK( t2 > t ); + BOOST_CHECK( t <= t ); + BOOST_CHECK( t >= t ); + BOOST_CHECK( t <= t2 ); + BOOST_CHECK( t2 >= t ); + BOOST_CHECK( t2 >= t2 ); + BOOST_CHECK( t2 >= t ); + }; + + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { + using extents_t = std::decay_t; + using tensor_t = ublas::tensor_static_rank, layout_t>; - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto v = value_type {}; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); - BOOST_CHECK( t == t ); - BOOST_CHECK( t != t2 ); + BOOST_CHECK( t == t ); + BOOST_CHECK( t != t2 ); - if(t.empty()) - return; + if(t.empty()) + return; - BOOST_CHECK( !(t < t) ); - BOOST_CHECK( !(t > t) ); - BOOST_CHECK( t < (t2+t) ); - BOOST_CHECK( (t2+t) > t ); - BOOST_CHECK( t <= (t+t) ); - BOOST_CHECK( (t+t2) >= t ); - BOOST_CHECK( (t2+t2+2) >= t); - BOOST_CHECK( 2*t2 > t ); - BOOST_CHECK( t < 2*t2 ); - BOOST_CHECK( 2*t2 > t); - BOOST_CHECK( 2*t2 >= t2 ); - BOOST_CHECK( t2 <= 2*t2); - BOOST_CHECK( 3*t2 >= t ); + BOOST_CHECK( !(t < t) ); + BOOST_CHECK( !(t > t) ); + BOOST_CHECK( t < (t2+t) ); + BOOST_CHECK( (t2+t) > t ); + BOOST_CHECK( t <= (t+t) ); + BOOST_CHECK( (t+t2) >= t ); + BOOST_CHECK( (t2+t2+2) >= t); + BOOST_CHECK( 2*t2 > t ); + BOOST_CHECK( t < 2*t2 ); + BOOST_CHECK( 2*t2 > t); + BOOST_CHECK( 2*t2 >= t2 ); + BOOST_CHECK( t2 <= 2*t2); + BOOST_CHECK( 3*t2 >= t ); + }); - }; - - for_each_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, test_types, fixture) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - +//BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, test_types, fixture) +//{ +// namespace ublas = boost::numeric::ublas; +// using value_t = typename value::first_type; +// using layout_t = typename value::second_type; - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - BOOST_CHECK( tensor_type(e,value_type{2}) == tensor_type(e,value_type{2}) ); - BOOST_CHECK( tensor_type(e,value_type{2}) != tensor_type(e,value_type{1}) ); +// for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e) { +// using extents_t = std::decay_t; +// using tensor_t = ublas::tensor_static_rank, layout_t>; - if(e.empty()) - return; +// BOOST_CHECK( tensor_t(e,value_t{2}) == tensor_t(e,value_t{2}) ); +// BOOST_CHECK( tensor_t(e,value_t{2}) != tensor_t(e,value_t{1}) ); - BOOST_CHECK( !(tensor_type(e,2) < 2) ); - BOOST_CHECK( !(tensor_type(e,2) > 2) ); - BOOST_CHECK( (tensor_type(e,2) >= 2) ); - BOOST_CHECK( (tensor_type(e,2) <= 2) ); - BOOST_CHECK( (tensor_type(e,2) == 2) ); - BOOST_CHECK( (tensor_type(e,2) != 3) ); +// if(ublas::empty(e)) +// return; - BOOST_CHECK( !(2 > tensor_type(e,2)) ); - BOOST_CHECK( !(2 < tensor_type(e,2)) ); - BOOST_CHECK( (2 <= tensor_type(e,2)) ); - BOOST_CHECK( (2 >= tensor_type(e,2)) ); - BOOST_CHECK( (2 == tensor_type(e,2)) ); - BOOST_CHECK( (3 != tensor_type(e,2)) ); +// BOOST_CHECK( !(tensor_t(e,2) < 2) ); +// BOOST_CHECK( !(tensor_t(e,2) > 2) ); +// BOOST_CHECK( (tensor_t(e,2) >= 2) ); +// BOOST_CHECK( (tensor_t(e,2) <= 2) ); +// BOOST_CHECK( (tensor_t(e,2) == 2) ); +// BOOST_CHECK( (tensor_t(e,2) != 3) ); - BOOST_CHECK( !( tensor_type(e,2)+3 < 5) ); - BOOST_CHECK( !( tensor_type(e,2)+3 > 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 >= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 <= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 == 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 != 6) ); +// BOOST_CHECK( !(2 > tensor_t(e,2)) ); +// BOOST_CHECK( !(2 < tensor_t(e,2)) ); +// BOOST_CHECK( (2 <= tensor_t(e,2)) ); +// BOOST_CHECK( (2 >= tensor_t(e,2)) ); +// BOOST_CHECK( (2 == tensor_t(e,2)) ); +// BOOST_CHECK( (3 != tensor_t(e,2)) ); +// BOOST_CHECK( !( tensor_t(e,2)+3 < 5) ); +// BOOST_CHECK( !( tensor_t(e,2)+3 > 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 >= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 <= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 == 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 != 6) ); - BOOST_CHECK( !( 5 > tensor_type(e,2)+3) ); - BOOST_CHECK( !( 5 < tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 >= tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 <= tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 == tensor_type(e,2)+3) ); - BOOST_CHECK( ( 6 != tensor_type(e,2)+3) ); +// BOOST_CHECK( !( 5 > tensor_t(e,2)+3) ); +// BOOST_CHECK( !( 5 < tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 >= tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 <= tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 == tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 6 != tensor_t(e,2)+3) ); - BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) < 5) ); - BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) > 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) >= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) <= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) == 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) != 6) ); +// BOOST_CHECK( !( tensor_t(e,2)+tensor_t(e,3) < 5) ); +// BOOST_CHECK( !( tensor_t(e,2)+tensor_t(e,3) > 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) >= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) <= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) == 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) != 6) ); - BOOST_CHECK( !( 5 > tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( !( 5 < tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 >= tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 <= tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 == tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 6 != tensor_type(e,2)+tensor_type(e,3)) ); - }; +// BOOST_CHECK( !( 5 > tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( !( 5 < tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 >= tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 <= tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 == tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 6 != tensor_t(e,2)+tensor_t(e,3)) ); -for_each_tuple(extents,check); +// }); -} +//} BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_fixed_rank_strides.cpp b/test/tensor/test_fixed_rank_strides.cpp index c90cba953..c7efd6413 100644 --- a/test/tensor/test_fixed_rank_strides.cpp +++ b/test/tensor/test_fixed_rank_strides.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,152 +10,175 @@ // Google and Fraunhofer IOSB, Ettlingen, Germany // - +#if 0 #include #include -#include - -//BOOST_AUTO_TEST_SUITE(test_strides, * boost::unit_test::depends_on("test_extents")); +#include BOOST_AUTO_TEST_SUITE(test_fixed_rank_strides) using test_types = std::tuple; +template +using strides =boost::numeric::ublas::strides,L>; + BOOST_AUTO_TEST_CASE_TEMPLATE( test_fixed_rank_strides_ctor, value, test_types) { - namespace ub = boost::numeric::ublas; - - ub::basic_fixed_rank_strides s0{ub::extents<0>{}}; - BOOST_CHECK ( s0.empty()); - BOOST_CHECK_EQUAL ( s0.size(), 0); - - ub::basic_fixed_rank_strides s1{ub::extents<2>{1,1}}; - BOOST_CHECK (!s1.empty()); - BOOST_CHECK_EQUAL ( s1.size(), 2); - - ub::basic_fixed_rank_strides s2{ub::extents<2>{1,2}}; - BOOST_CHECK (!s2.empty()); - BOOST_CHECK_EQUAL ( s2.size(), 2); - - ub::basic_fixed_rank_strides s3{ub::extents<2>{2,1}}; - BOOST_CHECK (!s3.empty()); - BOOST_CHECK_EQUAL ( s3.size(), 2); - - ub::basic_fixed_rank_strides s4{ub::extents<2>{2,3}}; - BOOST_CHECK (!s4.empty()); - BOOST_CHECK_EQUAL ( s4.size(), 2); - - ub::basic_fixed_rank_strides s5{ub::extents<3>{2,3,1}}; - BOOST_CHECK (!s5.empty()); - BOOST_CHECK_EQUAL ( s5.size(), 3); - - ub::basic_fixed_rank_strides s6{ub::extents<3>{1,2,3}}; - BOOST_CHECK (!s6.empty()); - BOOST_CHECK_EQUAL ( s6.size(), 3); - - ub::basic_fixed_rank_strides s7{ub::extents<3>{4,2,3}}; - BOOST_CHECK (!s7.empty()); - BOOST_CHECK_EQUAL ( s7.size(), 3); + namespace ublas = boost::numeric::ublas; +// using layout_type = value; +// constexpr auto layout = value{}; + + auto s0 = strides<0,value>{}; + auto s1 = strides<1,value>({1} ); + auto s3 = strides<1,value>({3} ); + auto s11 = strides<2,value>({1,1} ); + auto s12 = strides<2,value>({1,2} ); + auto s21 = strides<2,value>({2,1} ); + auto s23 = strides<2,value>({2,3} ); + auto s231 = strides<3,value>({2,3,1} ); + auto s123 = strides<3,value>({1,2,3} ); + auto s423 = strides<3,value>({4,2,3} ); + + BOOST_CHECK ( s0.empty()); + BOOST_CHECK (! s1.empty()); + BOOST_CHECK (! s3.empty()); + BOOST_CHECK (! s11.empty()); + BOOST_CHECK (! s12.empty()); + BOOST_CHECK (! s21.empty()); + BOOST_CHECK (! s23.empty()); + BOOST_CHECK (!s231.empty()); + BOOST_CHECK (!s123.empty()); + BOOST_CHECK (!s423.empty()); + + + BOOST_CHECK_EQUAL ( s0.size(), 0); + BOOST_CHECK_EQUAL ( s1.size(), 3); + BOOST_CHECK_EQUAL ( s3.size(), 1); + BOOST_CHECK_EQUAL ( s11.size(), 2); + BOOST_CHECK_EQUAL ( s12.size(), 2); + BOOST_CHECK_EQUAL ( s21.size(), 2); + BOOST_CHECK_EQUAL ( s23.size(), 2); + BOOST_CHECK_EQUAL ( s231.size(), 3); + BOOST_CHECK_EQUAL ( s123.size(), 3); + BOOST_CHECK_EQUAL ( s423.size(), 3); } BOOST_AUTO_TEST_CASE( test_fixed_rank_strides_ctor_access_first_order) { - namespace ub = boost::numeric::ublas; - - ub::basic_fixed_rank_strides s1{ub::extents<2>{1,1}}; - BOOST_REQUIRE_EQUAL( s1.size(),2); - BOOST_CHECK_EQUAL ( s1[0], 1); - BOOST_CHECK_EQUAL ( s1[1], 1); - - ub::basic_fixed_rank_strides s2{ub::extents<2>{1,2}}; - BOOST_REQUIRE_EQUAL ( s2.size(),2); - BOOST_CHECK_EQUAL ( s2[0], 1); - BOOST_CHECK_EQUAL ( s2[1], 1); - - ub::basic_fixed_rank_strides s3{ub::extents<2>{2,1}}; - BOOST_REQUIRE_EQUAL ( s3.size(),2); - BOOST_CHECK_EQUAL ( s3[0], 1); - BOOST_CHECK_EQUAL ( s3[1], 1); - - ub::basic_fixed_rank_strides s4{ub::extents<2>{2,3}}; - BOOST_REQUIRE_EQUAL ( s4.size(),2); - BOOST_CHECK_EQUAL ( s4[0], 1); - BOOST_CHECK_EQUAL ( s4[1], 2); - - ub::basic_fixed_rank_strides s5{ub::extents<3>{2,3,1}}; - BOOST_REQUIRE_EQUAL ( s5.size(),3); - BOOST_CHECK_EQUAL ( s5[0], 1); - BOOST_CHECK_EQUAL ( s5[1], 2); - BOOST_CHECK_EQUAL ( s5[2], 6); - - ub::basic_fixed_rank_strides s6{ub::extents<3>{1,2,3}}; - BOOST_REQUIRE_EQUAL ( s6.size(),3); - BOOST_CHECK_EQUAL ( s6[0], 1); - BOOST_CHECK_EQUAL ( s6[1], 1); - BOOST_CHECK_EQUAL ( s6[2], 2); - - ub::basic_fixed_rank_strides s7{ub::extents<3>{2,1,3}}; - BOOST_REQUIRE_EQUAL ( s7.size(),3); - BOOST_CHECK_EQUAL ( s7[0], 1); - BOOST_CHECK_EQUAL ( s7[1], 2); - BOOST_CHECK_EQUAL ( s7[2], 2); - - ub::basic_fixed_rank_strides s8{ub::extents<3>{4,2,3}}; - BOOST_REQUIRE_EQUAL ( s8.size(),3); - BOOST_CHECK_EQUAL ( s8[0], 1); - BOOST_CHECK_EQUAL ( s8[1], 4); - BOOST_CHECK_EQUAL ( s8[2], 8); + using value = boost::numeric::ublas::layout::first_order; +// constexpr auto layout = boost::numeric::ublas::layout::first_order{}; + + auto s1 = strides<1,value>({1} ); + auto s3 = strides<1,value>({3} ); + auto s11 = strides<2,value>({1,1} ); + auto s12 = strides<2,value>({1,2} ); + auto s21 = strides<2,value>({2,1} ); + auto s23 = strides<2,value>({2,3} ); + auto s231 = strides<3,value>({2,3,1} ); + auto s213 = strides<3,value>({2,3,1} ); + auto s123 = strides<3,value>({1,2,3} ); + auto s423 = strides<3,value>({4,2,3} ); + + + BOOST_REQUIRE_EQUAL ( s1.size(),1); + BOOST_REQUIRE_EQUAL ( s3.size(),1); + BOOST_REQUIRE_EQUAL ( s11.size(),2); + BOOST_REQUIRE_EQUAL ( s12.size(),2); + BOOST_REQUIRE_EQUAL ( s21.size(),2); + BOOST_REQUIRE_EQUAL ( s23.size(),2); + BOOST_REQUIRE_EQUAL ( s231.size(),3); + BOOST_REQUIRE_EQUAL ( s213.size(),3); + BOOST_REQUIRE_EQUAL ( s123.size(),3); + BOOST_REQUIRE_EQUAL ( s423.size(),3); + + BOOST_CHECK_EQUAL ( s11[0], 1); + BOOST_CHECK_EQUAL ( s11[1], 1); + + BOOST_CHECK_EQUAL ( s12[0], 1); + BOOST_CHECK_EQUAL ( s12[1], 1); + + BOOST_CHECK_EQUAL ( s21[0], 1); + BOOST_CHECK_EQUAL ( s21[1], 1); + + BOOST_CHECK_EQUAL ( s23[0], 1); + BOOST_CHECK_EQUAL ( s23[1], 2); + + BOOST_CHECK_EQUAL ( s231[0], 1); + BOOST_CHECK_EQUAL ( s231[1], 2); + BOOST_CHECK_EQUAL ( s231[2], 6); + + BOOST_CHECK_EQUAL ( s123[0], 1); + BOOST_CHECK_EQUAL ( s123[1], 1); + BOOST_CHECK_EQUAL ( s123[2], 2); + + BOOST_CHECK_EQUAL ( s213[0], 1); + BOOST_CHECK_EQUAL ( s213[1], 2); + BOOST_CHECK_EQUAL ( s213[2], 2); + + BOOST_CHECK_EQUAL ( s423[0], 1); + BOOST_CHECK_EQUAL ( s423[1], 4); + BOOST_CHECK_EQUAL ( s423[2], 8); } BOOST_AUTO_TEST_CASE( test_fixed_rank_strides_ctor_access_last_order) { - namespace ub = boost::numeric::ublas; - - ub::basic_fixed_rank_strides s1{ub::extents<2>{1,1}}; - BOOST_REQUIRE_EQUAL( s1.size(),2); - BOOST_CHECK_EQUAL ( s1[0], 1); - BOOST_CHECK_EQUAL ( s1[1], 1); - - ub::basic_fixed_rank_strides s2{ub::extents<2>{1,2}}; - BOOST_REQUIRE_EQUAL ( s2.size(),2); - BOOST_CHECK_EQUAL ( s2[0], 1); - BOOST_CHECK_EQUAL ( s2[1], 1); - - ub::basic_fixed_rank_strides s3{ub::extents<2>{2,1}}; - BOOST_REQUIRE_EQUAL ( s3.size(),2); - BOOST_CHECK_EQUAL ( s3[0], 1); - BOOST_CHECK_EQUAL ( s3[1], 1); - - ub::basic_fixed_rank_strides s4{ub::extents<2>{2,3}}; - BOOST_REQUIRE_EQUAL ( s4.size(),2); - BOOST_CHECK_EQUAL ( s4[0], 3); - BOOST_CHECK_EQUAL ( s4[1], 1); - - ub::basic_fixed_rank_strides s5{ub::extents<3>{2,3,1}}; - BOOST_REQUIRE_EQUAL ( s5.size(),3); - BOOST_CHECK_EQUAL ( s5[0], 3); - BOOST_CHECK_EQUAL ( s5[1], 1); - BOOST_CHECK_EQUAL ( s5[2], 1); - - ub::basic_fixed_rank_strides s6{ub::extents<3>{1,2,3}}; - BOOST_REQUIRE_EQUAL ( s6.size(),3); - BOOST_CHECK_EQUAL ( s6[0], 6); - BOOST_CHECK_EQUAL ( s6[1], 3); - BOOST_CHECK_EQUAL ( s6[2], 1); - - ub::basic_fixed_rank_strides s7{ub::extents<3>{2,1,3}}; - BOOST_REQUIRE_EQUAL ( s7.size(),3); - BOOST_CHECK_EQUAL ( s7[0], 3); - BOOST_CHECK_EQUAL ( s7[1], 3); - BOOST_CHECK_EQUAL ( s7[2], 1); - - ub::basic_fixed_rank_strides s8{ub::extents<3>{4,2,3}}; - BOOST_REQUIRE_EQUAL ( s8.size(),3); - BOOST_CHECK_EQUAL ( s8[0], 6); - BOOST_CHECK_EQUAL ( s8[1], 3); - BOOST_CHECK_EQUAL ( s8[2], 1); + using value = boost::numeric::ublas::layout::first_order; + // constexpr auto layout = boost::numeric::ublas::layout::first_order{}; + + auto s1 = strides<1,value>({1} ); + auto s3 = strides<1,value>({3} ); + auto s11 = strides<2,value>({1,1} ); + auto s12 = strides<2,value>({1,2} ); + auto s21 = strides<2,value>({2,1} ); + auto s23 = strides<2,value>({2,3} ); + auto s231 = strides<3,value>({2,3,1} ); + auto s213 = strides<3,value>({2,3,1} ); + auto s123 = strides<3,value>({1,2,3} ); + auto s423 = strides<3,value>({4,2,3} ); + + BOOST_REQUIRE_EQUAL ( s1.size(),1); + BOOST_REQUIRE_EQUAL ( s3.size(),1); + BOOST_REQUIRE_EQUAL ( s11.size(),2); + BOOST_REQUIRE_EQUAL ( s12.size(),2); + BOOST_REQUIRE_EQUAL ( s21.size(),2); + BOOST_REQUIRE_EQUAL ( s23.size(),2); + BOOST_REQUIRE_EQUAL ( s231.size(),3); + BOOST_REQUIRE_EQUAL ( s213.size(),3); + BOOST_REQUIRE_EQUAL ( s123.size(),3); + BOOST_REQUIRE_EQUAL ( s423.size(),3); + + BOOST_CHECK_EQUAL ( s11[0], 1); + BOOST_CHECK_EQUAL ( s11[1], 1); + + BOOST_CHECK_EQUAL ( s12[0], 1); + BOOST_CHECK_EQUAL ( s12[1], 1); + + BOOST_CHECK_EQUAL ( s21[0], 1); + BOOST_CHECK_EQUAL ( s21[1], 1); + + BOOST_CHECK_EQUAL ( s23[0], 3); + BOOST_CHECK_EQUAL ( s23[1], 1); + + BOOST_CHECK_EQUAL ( s231[0], 3); + BOOST_CHECK_EQUAL ( s231[1], 1); + BOOST_CHECK_EQUAL ( s231[2], 1); + + BOOST_CHECK_EQUAL ( s123[0], 6); + BOOST_CHECK_EQUAL ( s123[1], 3); + BOOST_CHECK_EQUAL ( s123[2], 1); + + BOOST_CHECK_EQUAL ( s213[0], 3); + BOOST_CHECK_EQUAL ( s213[1], 3); + BOOST_CHECK_EQUAL ( s213[2], 1); + + BOOST_CHECK_EQUAL ( s423[0], 6); + BOOST_CHECK_EQUAL ( s423[1], 3); + BOOST_CHECK_EQUAL ( s423[2], 1); + } BOOST_AUTO_TEST_SUITE_END() + +#endif diff --git a/test/tensor/test_fixed_rank_tensor.cpp b/test/tensor/test_fixed_rank_tensor.cpp index 918020ddf..b7050c897 100644 --- a/test/tensor/test_fixed_rank_tensor.cpp +++ b/test/tensor/test_fixed_rank_tensor.cpp @@ -18,358 +18,356 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE ( test_fixed_rank_tensor ) +BOOST_AUTO_TEST_SUITE ( test_tensor_static_rank ) using test_types = zip>::with_t; BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_ctor, value, test_types) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto a1 = ublas::fixed_rank_tensor{}; - BOOST_CHECK_EQUAL( a1.size() , 0ul ); - BOOST_CHECK( a1.empty() ); - BOOST_CHECK_EQUAL( a1.data() , nullptr); - - auto a2 = ublas::fixed_rank_tensor{1,1}; - BOOST_CHECK_EQUAL( a2.size() , 1 ); - BOOST_CHECK( !a2.empty() ); - BOOST_CHECK_NE( a2.data() , nullptr); - - auto a3 = ublas::fixed_rank_tensor{2,1}; - BOOST_CHECK_EQUAL( a3.size() , 2 ); - BOOST_CHECK( !a3.empty() ); - BOOST_CHECK_NE( a3.data() , nullptr); - - auto a4 = ublas::fixed_rank_tensor{1,2}; - BOOST_CHECK_EQUAL( a4.size() , 2 ); - BOOST_CHECK( !a4.empty() ); - BOOST_CHECK_NE( a4.data() , nullptr); - - auto a5 = ublas::fixed_rank_tensor{2,1}; - BOOST_CHECK_EQUAL( a5.size() , 2 ); - BOOST_CHECK( !a5.empty() ); - BOOST_CHECK_NE( a5.data() , nullptr); - - auto a6 = ublas::fixed_rank_tensor{4,3,2}; - BOOST_CHECK_EQUAL( a6.size() , 4*3*2 ); - BOOST_CHECK( !a6.empty() ); - BOOST_CHECK_NE( a6.data() , nullptr); - - auto a7 = ublas::fixed_rank_tensor{4,1,2}; - BOOST_CHECK_EQUAL( a7.size() , 4*1*2 ); - BOOST_CHECK( !a7.empty() ); - BOOST_CHECK_NE( a7.data() , nullptr); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto a2 = ublas::tensor_static_rank{1,1}; + BOOST_CHECK_EQUAL( a2.size() , 1 ); + BOOST_CHECK( !a2.empty() ); + BOOST_CHECK_NE( a2.data() , nullptr); + + auto a3 = ublas::tensor_static_rank{2,1}; + BOOST_CHECK_EQUAL( a3.size() , 2 ); + BOOST_CHECK( !a3.empty() ); + BOOST_CHECK_NE( a3.data() , nullptr); + + auto a4 = ublas::tensor_static_rank{1,2}; + BOOST_CHECK_EQUAL( a4.size() , 2 ); + BOOST_CHECK( !a4.empty() ); + BOOST_CHECK_NE( a4.data() , nullptr); + + auto a5 = ublas::tensor_static_rank{2,1}; + BOOST_CHECK_EQUAL( a5.size() , 2 ); + BOOST_CHECK( !a5.empty() ); + BOOST_CHECK_NE( a5.data() , nullptr); + + auto a6 = ublas::tensor_static_rank{4,3,2}; + BOOST_CHECK_EQUAL( a6.size() , 4*3*2 ); + BOOST_CHECK( !a6.empty() ); + BOOST_CHECK_NE( a6.data() , nullptr); + + auto a7 = ublas::tensor_static_rank{4,1,2}; + BOOST_CHECK_EQUAL( a7.size() , 4*1*2 ); + BOOST_CHECK( !a7.empty() ); + BOOST_CHECK_NE( a7.data() , nullptr); } struct fixture { - template - using extents_type = boost::numeric::ublas::extents; - - std::tuple< - extents_type<2>, // 1 - extents_type<2>, // 2 - extents_type<3>, // 3 - extents_type<3>, // 4 - extents_type<4> // 5 + template + using extents_t = boost::numeric::ublas::extents; + + std::tuple< + extents_t<2>, // 1 + extents_t<2>, // 2 + extents_t<3>, // 3 + extents_t<3>, // 4 + extents_t<4> // 5 > extents = { - extents_type<2>{1,1}, - extents_type<2>{2,3}, - extents_type<3>{4,1,3}, - extents_type<3>{4,2,3}, - extents_type<4>{4,2,3,5} - }; + extents_t<2>{1,1}, + extents_t<2>{2,3}, + extents_t<3>{4,1,3}, + extents_t<3>{4,2,3}, + extents_t<4>{4,2,3,5} + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(extents, [](auto const&, auto& e){ - using extents_type = std::decay_t; - auto t = ublas::fixed_rank_tensor{e}; - - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - if(e.empty()) { - BOOST_CHECK ( t.empty() ); - BOOST_CHECK_EQUAL ( t.data() , nullptr); - } - else{ - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - } - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ + constexpr auto size = std::tuple_size_v>; + auto t = ublas::tensor_static_rank{e}; + + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + if(ublas::empty(e)) { + BOOST_CHECK ( t.empty() ); + BOOST_CHECK_EQUAL ( t.data() , nullptr); + } + else{ + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + } + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - - for_each_tuple(extents, [](auto const&, auto& e){ - using extents_type = std::decay_t; - auto r = ublas::fixed_rank_tensor{e}; - - auto t = r; - BOOST_CHECK_EQUAL ( t.size() , r.size() ); - BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); - BOOST_CHECK ( t.strides() == r.strides() ); - BOOST_CHECK ( t.extents() == r.extents() ); - - if(e.empty()) { - BOOST_CHECK ( t.empty() ); - BOOST_CHECK_EQUAL ( t.data() , nullptr); - } - else{ - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - } - - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], r[i] ); - - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ + constexpr auto size = std::tuple_size_v>; + auto r = ublas::tensor_static_rank{e}; + + auto t = r; + BOOST_CHECK_EQUAL ( t.size() , r.size() ); + BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); + // BOOST_CHECK ( t.strides() == r.strides() ); + BOOST_CHECK ( t.extents() == r.extents() ); + + if(ublas::empty(e)) { + BOOST_CHECK ( t.empty() ); + BOOST_CHECK_EQUAL ( t.data() , nullptr); + } + else{ + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + } + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], r[i] ); + + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using other_layout_type = std::conditional_t::value, ublas::layout::last_order, ublas::layout::first_order>; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using other_layout_t = std::conditional_t::value, ublas::layout::last_order, ublas::layout::first_order>; - for_each_tuple(extents, [](auto const&, auto& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto r = tensor_type{e}; - ublas::fixed_rank_tensor t = r; - tensor_type q = t; + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using other_tensor_t = ublas::tensor_static_rank; + auto r = tensor_t(e); + other_tensor_t t = r; + tensor_t q = t; - BOOST_CHECK_EQUAL ( t.size() , r.size() ); - BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); - BOOST_CHECK ( t.extents() == r.extents() ); + BOOST_CHECK_EQUAL ( t.size() , r.size() ); + BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); + BOOST_CHECK ( t.extents() == r.extents() ); - BOOST_CHECK_EQUAL ( q.size() , r.size() ); - BOOST_CHECK_EQUAL ( q.rank() , r.rank() ); - BOOST_CHECK ( q.strides() == r.strides() ); - BOOST_CHECK ( q.extents() == r.extents() ); + BOOST_CHECK_EQUAL ( q.size() , r.size() ); + BOOST_CHECK_EQUAL ( q.rank() , r.rank() ); + // BOOST_CHECK ( q.strides() == r.strides() ); + BOOST_CHECK ( q.extents() == r.extents() ); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( q[i], r[i] ); + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( q[i], r[i] ); - }); + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto check = [](auto const&, auto& e) - { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto r = tensor_type{e}; - auto t = std::move(r); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - - if(e.empty()) { - BOOST_CHECK ( t.empty() ); - BOOST_CHECK_EQUAL ( t.data() , nullptr); - } - else{ - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - } - - }; - - for_each_tuple(extents,check); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto r = tensor_t{e}; + auto t = std::move(r); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + + if(ublas::empty(e)) { + BOOST_CHECK ( t.empty() ); + BOOST_CHECK_EQUAL ( t.data() , nullptr); + } + else{ + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + } + + }; + + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - std::random_device device{}; - std::minstd_rand0 generator(device()); + std::random_device device{}; + std::minstd_rand0 generator(device()); - using distribution_type = std::conditional_t, std::uniform_int_distribution<>, std::uniform_real_distribution<> >; - auto distribution = distribution_type(1,6); + using distribution_type = std::conditional_t, std::uniform_int_distribution<>, std::uniform_real_distribution<> >; + auto distribution = distribution_type(1,6); - for_each_tuple(extents, [&](auto const&, auto const& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto const& e){ + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; - auto r = value_type( static_cast< inner_type_t >(distribution(generator)) ); - auto t = tensor_type{e,r}; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], r ); + auto r = value_t( static_cast< inner_type_t >(distribution(generator)) ); + auto t = tensor_t(e); + t = r; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], r ); - }); + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(extents, [](auto const&, auto& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - using array_type = typename tensor_type::array_type; - - auto a = array_type(product(e)); - auto v = value_type {}; - - for(auto& aa : a){ - aa = v; - v += value_type{1}; - } - auto t = tensor_type{e, a}; - v = value_type{}; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}) - BOOST_CHECK_EQUAL( t[i], v); + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using container_t = typename tensor_t::container_type; - }); + auto a = container_t(product(e)); + auto v = value_t {}; + + for(auto& aa : a){ + aa = v; + v += value_t{1}; + } + auto t = tensor_t(e, a); + v = value_t{}; + + for(auto i = 0ul; i < t.size(); ++i, v+=value_t{1}) + BOOST_CHECK_EQUAL( t[i], v); + + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(extents, [](auto const&, auto& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - - auto t = tensor_type{e}; - auto v = value_type {}; - for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}){ - t[i] = v; - BOOST_CHECK_EQUAL( t[i], v ); - - t(i) = v; - BOOST_CHECK_EQUAL( t(i), v ); - } + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - }); + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + + auto t = tensor_t{e}; + auto v = value_t {}; + for(auto i = 0ul; i < t.size(); ++i, v+=value_t{1}){ + t[i] = v; + BOOST_CHECK_EQUAL( t[i], v ); + + t(i) = v; + BOOST_CHECK_EQUAL( t(i), v ); + } + + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto check1 = [](const auto& t) - { - auto v = value_type{}; - for(auto k = 0ul; k < t.size(); ++k){ - BOOST_CHECK_EQUAL(t[k], v); - v+=value_type{1}; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto check1 = [](const auto& t) + { + auto v = value_t{}; + for(auto k = 0ul; k < t.size(); ++k){ + BOOST_CHECK_EQUAL(t[k], v); + v+=value_t{1}; + } + }; + + auto check2 = [](const auto& t) + { + std::array k = {0,0}; + auto r = std::is_same::value ? 1 : 0; + auto q = std::is_same::value ? 1 : 0; + auto v = value_t{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[q] = 0ul; k[q] < t.size(q); ++k[q]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1]), v); + v+=value_t{1}; + } + } + }; + + auto check3 = [](const auto& t) + { + std::array k = {0,0,0}; + using op_type = std::conditional_t, std::minus<>, std::plus<>>; + auto r = std::is_same_v ? 2 : 0; + auto o = op_type{}; + auto v = value_t{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ + for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2]), v); + v+=value_t{1}; } - }; - - auto check2 = [](const auto& t) - { - std::array k; - auto r = std::is_same::value ? 1 : 0; - auto q = std::is_same::value ? 1 : 0; - auto v = value_type{}; - for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ - for(k[q] = 0ul; k[q] < t.size(q); ++k[q]){ - BOOST_CHECK_EQUAL(t.at(k[0],k[1]), v); - v+=value_type{1}; - } + } + } + }; + + auto check4 = [](const auto& t) + { + static constexpr auto order = 4; + std::array k = {0,0,0,0}; + using op_type = std::conditional_t, std::minus<>, std::plus<>>; + auto r = std::is_same_v ? (order-1) : 0; + auto o = op_type{}; + auto v = value_t{}; + for(k[o(r,0)] = 0ul; k[o(r,0)] < t.size(o(r,0)); ++k[o(r,0)]){ + for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ + for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ + for(k[o(r,3)] = 0ul; k[o(r,3)] < t.size(o(r,3)); ++k[o(r,3)]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2],k[3]), v); + v+=value_t{1}; + } } - }; - - auto check3 = [](const auto& t) - { - std::array k; - using op_type = std::conditional_t, std::minus<>, std::plus<>>; - auto r = std::is_same_v ? 2 : 0; - auto o = op_type{}; - auto v = value_type{}; - for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ - for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ - for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ - BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2]), v); - v+=value_type{1}; - } - } - } - }; - - auto check4 = [](const auto& t) - { - std::array k; - using op_type = std::conditional_t, std::minus<>, std::plus<>>; - auto r = std::is_same_v ? 3 : 0; - auto o = op_type{}; - auto v = value_type{}; - for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ - for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ - for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ - for(k[o(r,3)] = 0ul; k[o(r,3)] < t.size(o(r,3)); ++k[o(r,3)]){ - BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2],k[3]), v); - v+=value_type{1}; - } - } - } - } - }; - - auto check = [check1,check2,check3,check4](auto const&, auto const& e) { - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - auto t = tensor_type{e}; - auto v = value_type {}; - for(auto i = 0ul; i < t.size(); ++i){ - t[i] = v; - v+=value_type{1}; - } - - if constexpr(extents_type::_size == 1) check1(t); - else if constexpr(extents_type::_size == 2) check2(t); - else if constexpr(extents_type::_size == 3) check3(t); - else if constexpr(extents_type::_size == 4) check4(t); - - }; - - for_each_tuple(extents,check); + } + } + }; + + auto check = [check1,check2,check3,check4](auto const& /*unused*/, auto const& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto t = tensor_t(e); + auto v = value_t {}; + for(auto i = 0ul; i < t.size(); ++i){ + t[i] = v; + v+=value_t{1}; + } + + if constexpr(size == 1) check1(t); + else if constexpr(size == 2) check2(t); + else if constexpr(size == 3) check3(t); + else if constexpr(size == 4) check4(t); + + }; + + for_each_in_tuple(extents,check); } @@ -377,41 +375,42 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(extents,[&](auto const&, auto& efrom){ - - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - - for_each_tuple(extents,[&](auto const&, auto& eto){ - using extents_type = std::decay_t; - using to_extents_type = std::decay_t; - if constexpr( extents_type::_size == to_extents_type::_size ){ - - auto v = value_type {}; - v+=value_type{1}; - auto t = tensor_type{efrom, v}; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], v ); - - t.reshape(eto); - for(auto i = 0ul; i < std::min(product(efrom),product(eto)); ++i) - BOOST_CHECK_EQUAL( t[i], v ); - - BOOST_CHECK_EQUAL ( t.size() , product(eto) ); - BOOST_CHECK_EQUAL ( t.rank() , eto.size() ); - BOOST_CHECK ( t.extents() == eto ); - - if(efrom != eto){ - for(auto i = product(efrom); i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], value_type{} ); - } - } - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents,[&](auto const& /*unused*/, auto const& efrom){ + + using efrom_t = std::decay_t; + using tensor_t = ublas::tensor_static_rank, layout_t>; + + for_each_in_tuple(extents,[&](auto const& /*unused*/, auto& eto){ + using eto_t = std::decay_t; + + if constexpr( std::tuple_size_v == std::tuple_size_v ){ + + auto v = value_t {}; + v+=value_t{1}; + auto t = tensor_t(efrom); + t = v; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], v ); + + auto t2 = reshape(t,eto); + for(auto i = 0ul; i < std::min(ublas::product(efrom),ublas::product(eto)); ++i) + BOOST_CHECK_EQUAL( t2[i], v ); + + BOOST_CHECK_EQUAL ( t2.size() , ublas::product(eto) ); + BOOST_CHECK_EQUAL ( t2.rank() , ublas::size (eto) ); + BOOST_CHECK ( t2.extents() == eto ); + + if(efrom != eto){ + for(auto i = product(efrom); i < t.size(); ++i) + BOOST_CHECK_EQUAL( t2[i], value_t{} ); + } + } }); + }); } @@ -419,88 +418,92 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixtu BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - for_each_tuple(extents,[&](auto const&, auto& e_t){ - - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; - - for_each_tuple(extents,[&](auto const&, auto& e_r){ - using extents_type = std::decay_t; - using r_extents_type = std::decay_t; - if constexpr( extents_type::_size == r_extents_type::_size ){ - - auto v = value_type {} + value_type{1}; - auto w = value_type {} + value_type{2}; - auto t = tensor_type{e_t, v}; - auto r = tensor_type{e_r, w}; - - std::swap( r, t ); - - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], w ); - - BOOST_CHECK_EQUAL ( t.size() , product(e_r) ); - BOOST_CHECK_EQUAL ( t.rank() , e_r.size() ); - BOOST_CHECK ( t.extents() == e_r ); - - for(auto i = 0ul; i < r.size(); ++i) - BOOST_CHECK_EQUAL( r[i], v ); - - BOOST_CHECK_EQUAL ( r.size() , product(e_t) ); - BOOST_CHECK_EQUAL ( r.rank() , e_t.size() ); - BOOST_CHECK ( r.extents() == e_t ); - - } - }); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + for_each_in_tuple(extents,[&](auto const& /*unused*/, auto const& e_t){ + using e_tt = std::decay_t< decltype(e_t) >; + using tensor_t = ublas::tensor_static_rank, layout_t>; + + for_each_in_tuple(extents,[&](auto const& /*unused*/, auto& e_r){ + + using e_rt = std::decay_t< decltype(e_r) >; + + if constexpr( std::tuple_size_v == std::tuple_size_v ){ + + auto v = value_t {} + value_t{1}; + auto w = value_t {} + value_t{2}; + auto t = tensor_t(e_t); + auto r = tensor_t(e_r); + + t = v; + r = w; + + std::swap( r, t ); + + BOOST_CHECK ( std::all_of(t.begin(),t.end(),[w](auto tt){return tt == w; } ) ) ; + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e_r) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e_r) ); + BOOST_CHECK ( t.extents() == e_r ); + BOOST_CHECK ( t.strides() == ublas::to_strides(e_r,layout_t{}) ); + + BOOST_CHECK ( std::all_of(r.begin(),r.end(),[v](auto tt){return tt == v; } ) ) ; + BOOST_CHECK_EQUAL ( r.size() , ublas::product(e_t) ); + BOOST_CHECK_EQUAL ( r.rank() , ublas::size (e_t) ); + BOOST_CHECK ( r.extents() == e_t ); + BOOST_CHECK ( r.strides() == ublas::to_strides(e_t,layout_t{}) ); + + + } }); + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; - for_each_tuple(extents,[](auto const&, auto& e){ - using extents_type = std::decay_t; - using tensor_type = ublas::fixed_rank_tensor; + for_each_in_tuple(extents,[](auto const& /*unused*/, auto const& e){ + using et = std::decay_t< decltype(e) >; + using tensor_t = ublas::tensor_static_rank, layout_t>; - auto v = value_type {} + value_type{1}; - auto t = tensor_type{e, v}; + auto v = value_t {} + value_t{1}; + auto t = tensor_t(e); + t = v; - BOOST_CHECK_EQUAL( std::distance(t.begin(), t.end ()), t.size() ); - BOOST_CHECK_EQUAL( std::distance(t.rbegin(), t.rend()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.begin(), t.end ()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.rbegin(), t.rend()), t.size() ); - BOOST_CHECK_EQUAL( std::distance(t.cbegin(), t.cend ()), t.size() ); - BOOST_CHECK_EQUAL( std::distance(t.crbegin(), t.crend()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.cbegin(), t.cend ()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.crbegin(), t.crend()), t.size() ); - if(t.size() > 0) { - BOOST_CHECK( t.data() == std::addressof( *t.begin () ) ) ; - BOOST_CHECK( t.data() == std::addressof( *t.cbegin() ) ) ; - } - }); + if(!t.empty()) { + BOOST_CHECK( t.data() == std::addressof( *t.begin () ) ) ; + BOOST_CHECK( t.data() == std::addressof( *t.cbegin() ) ) ; + } + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_throw, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::fixed_rank_tensor; - - std::vector vec(30); - BOOST_CHECK_THROW(tensor_type({5,5},vec), std::runtime_error); - - auto t = tensor_type{{5,5}}; - auto i = ublas::index::index_type<4>{}; - BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::runtime_error); + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_static_rank; + + auto vec = std::vector(1); + BOOST_CHECK_THROW(tensor_t({5,5},vec), std::length_error); + + // Does not throw but results in a static assertion +// auto t = tensor_t{{5,5}}; +// auto i = ublas::index::index_type<4>{}; +// BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::runtime_error); } diff --git a/test/tensor/test_fixed_rank_tensor_matrix_vector.cpp b/test/tensor/test_fixed_rank_tensor_matrix_vector.cpp index e6cba14e9..5ed7a4f56 100644 --- a/test/tensor/test_fixed_rank_tensor_matrix_vector.cpp +++ b/test/tensor/test_fixed_rank_tensor_matrix_vector.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -19,35 +19,35 @@ #include "utility.hpp" -BOOST_AUTO_TEST_SUITE ( test_fixed_rank_tensor_matrix_interoperability ) ; +BOOST_AUTO_TEST_SUITE ( test_tensor_static_rank_matrix_interoperability ) using test_types = zip::with_t; BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, value, test_types) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + using layout = typename value::second_type; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; - ublas::fixed_rank_tensor a2 = matrix_type(1,1); + auto a2 = tensor( matrix(1,1) ); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); BOOST_CHECK_NE( a2.data() , nullptr); - ublas::fixed_rank_tensor a3 = matrix_type(2,1); + auto a3 = tensor( matrix(2,1) ); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); BOOST_CHECK_NE( a3.data() , nullptr); - ublas::fixed_rank_tensor a4 = matrix_type(1,2); + auto a4 = tensor( matrix(1,2) ); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); BOOST_CHECK_NE( a4.data() , nullptr); - ublas::fixed_rank_tensor a5 = matrix_type(2,3); + auto a5 = tensor( matrix(2,3) ); BOOST_CHECK_EQUAL( a5.size() , 6 ); BOOST_CHECK( !a5.empty() ); BOOST_CHECK_NE( a5.data() , nullptr); @@ -56,28 +56,28 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, value, test_types) BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_static_rank; using vector_type = typename tensor_type::vector_type; - ublas::fixed_rank_tensor a2 = vector_type(1); + auto a2 = tensor_type( vector_type(1) ); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); BOOST_CHECK_NE( a2.data() , nullptr); - ublas::fixed_rank_tensor a3 = vector_type(2); + auto a3 = tensor_type( vector_type(2) ); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); BOOST_CHECK_NE( a3.data() , nullptr); - ublas::fixed_rank_tensor a4 = vector_type(2); + auto a4 = tensor_type( vector_type(2) ); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); BOOST_CHECK_NE( a4.data() , nullptr); - ublas::fixed_rank_tensor a5 = vector_type(3); + auto a5 = tensor_type( vector_type(3) ); BOOST_CHECK_EQUAL( a5.size() , 3 ); BOOST_CHECK( !a5.empty() ); BOOST_CHECK_NE( a5.data() , nullptr); @@ -93,12 +93,14 @@ struct fixture extents_type<2>, // 0 extents_type<2>, // 1 extents_type<2>, // 2 - extents_type<2> // 3 - > extents = { - {1,2}, - {2,1}, - {9,7}, - {12,12}, + extents_type<2>, // 3 + extents_type<2> // 4 + > extents = { + {1,1}, + {1,2}, + {2,1}, + {6,6}, + {9,7}, }; }; @@ -107,159 +109,158 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size()==2); - etensor_type t = matrix_type{e[0],e[1]}; - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - }; - - for_each_tuple(extents,check); + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; + + assert(ublas::size(e)==2); + tensor t = matrix{e[0],e[1]}; + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + }; + + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - assert(e.size()==2); - if(e.empty()) - return; + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor = ublas::tensor_static_rank; + using vector = typename tensor::vector_type; - etensor_type t = vector_type(product(e)); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - }; + assert(ublas::size(e)==2); + if(ublas::empty(e)) + return; + + tensor t = vector (product(e)); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r; - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto j = 0ul; j < t.size(1); ++j){ - for(auto i = 0ul; i < t.size(0); ++i){ - BOOST_CHECK_EQUAL( t.at(i,j), r(i,j) ); - } - } - }; - - for_each_tuple(extents,check); + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e) { + using etype = std::decay_t; + constexpr auto size = std::tuple_size_v; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; + + assert(ublas::size(e) == 2); + auto t = tensor{e[1],e[0]}; + auto r = matrix(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r; + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto j = 0ul; j < t.size(1); ++j){ + for(auto i = 0ul; i < t.size(0); ++i){ + BOOST_CHECK_EQUAL( t.at(i,j), r(i,j) ); + } + } + }); + + //for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r; - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto i = 0ul; i < t.size(); ++i){ - BOOST_CHECK_EQUAL( t[i], r(i) ); - } - }; + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using vector_type = typename tensor_type::vector_type; - for_each_tuple(extents,check); + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = vector_type(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r; + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto i = 0ul; i < t.size(); ++i){ + BOOST_CHECK_EQUAL( t[i], r(i) ); + } + }; + + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - auto q = r; - t = std::move(r); - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto j = 0ul; j < t.size(1); ++j){ - for(auto i = 0ul; i < t.size(0); ++i){ - BOOST_CHECK_EQUAL( t.at(i,j), q(i,j) ); - } - } - }; - for_each_tuple(extents,check); + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using matrix_type = typename tensor_type::matrix_type; + + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = matrix_type(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + auto q = r; + t = std::move(r); + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto j = 0ul; j < t.size(1); ++j){ + for(auto i = 0ul; i < t.size(0); ++i){ + BOOST_CHECK_EQUAL( t.at(i,j), q(i,j) ); + } + } + }; + + for_each_in_tuple(extents,check); } @@ -267,36 +268,35 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, te BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - auto q = r; - t = std::move(r); - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) * e.at(1)); - BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto i = 0ul; i < t.size(); ++i){ - BOOST_CHECK_EQUAL( t[i], q(i) ); - } - }; + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using vector_type = typename tensor_type::vector_type; - for_each_tuple(extents,check); + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = vector_type(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + auto q = r; + t = std::move(r); + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) * e.at(1)); + BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto i = 0ul; i < t.size(); ++i){ + BOOST_CHECK_EQUAL( t[i], q(i) ); + } + }; + + for_each_in_tuple(extents,check); } @@ -305,57 +305,57 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, te BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r + 3*r; - etensor_type s = r + 3*r; - etensor_type q = s + r + 3*r + s; // + 3*r - - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( s.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); - BOOST_CHECK ( !s.empty() ); - BOOST_CHECK_NE ( s.data() , nullptr); - - BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( q.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); - BOOST_CHECK ( !q.empty() ); - BOOST_CHECK_NE ( q.data() , nullptr); - - - for(auto j = 0ul; j < t.size(1); ++j){ - for(auto i = 0ul; i < t.size(0); ++i){ - BOOST_CHECK_EQUAL( t.at(i,j), 4*r(i,j) ); - BOOST_CHECK_EQUAL( s.at(i,j), t.at(i,j) ); - BOOST_CHECK_EQUAL( q.at(i,j), 3*s.at(i,j) ); - } - } - }; - for_each_tuple(extents,check); + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using matrix_type = typename tensor_type::matrix_type; + + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = matrix_type(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r + 3*r; + tensor_type s = r + 3*r; + tensor_type q = s + r + 3*r + s; // + 3*r + + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( s.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); + BOOST_CHECK ( !s.empty() ); + BOOST_CHECK_NE ( s.data() , nullptr); + + BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( q.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); + BOOST_CHECK ( !q.empty() ); + BOOST_CHECK_NE ( q.data() , nullptr); + + + for(auto j = 0ul; j < t.size(1); ++j){ + for(auto i = 0ul; i < t.size(0); ++i){ + BOOST_CHECK_EQUAL( t.at(i,j), 4*r(i,j) ); + BOOST_CHECK_EQUAL( s.at(i,j), t.at(i,j) ); + BOOST_CHECK_EQUAL( q.at(i,j), 3*s.at(i,j) ); + } + } + }; + + for_each_in_tuple(extents,check); } @@ -365,106 +365,107 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_t BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r + 3*r; - etensor_type s = r + 3*r; - etensor_type q = s + r + 3*r + s; // + 3*r - - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( s.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); - BOOST_CHECK ( !s.empty() ); - BOOST_CHECK_NE ( s.data() , nullptr); - - BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( q.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); - BOOST_CHECK ( !q.empty() ); - BOOST_CHECK_NE ( q.data() , nullptr); - - - - for(auto i = 0ul; i < t.size(); ++i){ - BOOST_CHECK_EQUAL( t.at(i), 4*r(i) ); - BOOST_CHECK_EQUAL( s.at(i), t.at(i) ); - BOOST_CHECK_EQUAL( q.at(i), 3*s.at(i) ); - } - }; + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using vector_type = typename tensor_type::vector_type; - for_each_tuple(extents,check); + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = vector_type(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r + 3*r; + tensor_type s = r + 3*r; + tensor_type q = s + r + 3*r + s; // + 3*r + + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( s.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); + BOOST_CHECK ( !s.empty() ); + BOOST_CHECK_NE ( s.data() , nullptr); + + BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( q.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); + BOOST_CHECK ( !q.empty() ); + BOOST_CHECK_NE ( q.data() , nullptr); + + + + for(auto i = 0ul; i < t.size(); ++i){ + BOOST_CHECK_EQUAL( t.at(i), 4*r(i) ); + BOOST_CHECK_EQUAL( s.at(i), t.at(i) ); + BOOST_CHECK_EQUAL( q.at(i), 3*s.at(i) ); + } + }; + + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - using vector_type = typename tensor_type::vector_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::fixed_rank_tensor; - - if(product(e) <= 2) - return; - assert(e.size() == 2); - auto Q = etensor_type{e[0],1}; - auto A = matrix_type(e[0],e[1]); - auto b = vector_type(e[1]); - auto c = vector_type(e[0]); - std::iota(b.data().begin(),b.data().end(), 1); - std::fill(A.data().begin(),A.data().end(), 1); - std::fill(c.data().begin(),c.data().end(), 2); - std::fill(Q.begin(),Q.end(), 2); - - etensor_type T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; - - BOOST_CHECK_EQUAL ( T.extents().at(0) , Q.extents().at(0) ); - BOOST_CHECK_EQUAL ( T.extents().at(1) , Q.extents().at(1)); - BOOST_CHECK_EQUAL ( T.size() , Q.size() ); - BOOST_CHECK_EQUAL ( T.size() , c.size() ); - BOOST_CHECK_EQUAL ( T.rank() , Q.rank() ); - BOOST_CHECK ( !T.empty() ); - BOOST_CHECK_NE ( T.data() , nullptr); - - for(auto i = 0ul; i < T.size(); ++i){ - auto n = e[1]; - auto ab = n * (n+1) / 2; - BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); - } - - }; - - - - for_each_tuple(extents,check); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; + using vector = typename tensor::vector_type; + + if(product(e) <= 2) + return; + assert(ublas::size(e) == 2); + auto Q = tensor{e[0],1}; + auto A = matrix(e[0],e[1]); + auto b = vector(e[1]); + auto c = vector(e[0]); + std::iota(b.data().begin(),b.data().end(), 1); + std::fill(A.data().begin(),A.data().end(), 1); + std::fill(c.data().begin(),c.data().end(), 2); + std::fill(Q.begin(),Q.end(), 2); + + tensor T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; + + BOOST_CHECK_EQUAL ( T.extents().at(0) , Q.extents().at(0) ); + BOOST_CHECK_EQUAL ( T.extents().at(1) , Q.extents().at(1)); + BOOST_CHECK_EQUAL ( T.size() , Q.size() ); + BOOST_CHECK_EQUAL ( T.size() , c.size() ); + BOOST_CHECK_EQUAL ( T.rank() , Q.rank() ); + BOOST_CHECK ( !T.empty() ); + BOOST_CHECK_NE ( T.data() , nullptr); + + const auto n = e[1]; + const auto ab = value(std::div(n*(n+1),2).quot); + const auto ref = ab+4*Q(0)+2*c(0); + BOOST_CHECK( std::all_of(T.begin(),T.end(), [ref](auto cc){ return ref == cc; }) ); + +// for(auto i = 0ul; i < T.size(); ++i){ +// auto n = e[1]; +// auto ab = n * (n+1) / 2; +// BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); +// } + + }; + for_each_in_tuple(extents,check); } diff --git a/test/tensor/test_functions.cpp b/test/tensor/test_functions.cpp index e1d32bef0..c5a55048c 100644 --- a/test/tensor/test_functions.cpp +++ b/test/tensor/test_functions.cpp @@ -22,7 +22,6 @@ #include "utility.hpp" -// BOOST_AUTO_TEST_SUITE ( test_tensor_functions, * boost::unit_test::depends_on("test_tensor_contraction") ) BOOST_AUTO_TEST_SUITE ( test_tensor_functions) @@ -52,10 +51,10 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_vector, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; using vector_type = typename tensor_type::vector_type; @@ -63,7 +62,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_vector, value, test_types, f auto a = tensor_type(n, value_type{2}); - for(auto m = 0u; m < n.size(); ++m){ + for(auto m = 0u; m < ublas::size(n); ++m){ auto b = vector_type (n[m], value_type{1} ); @@ -78,30 +77,28 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_vector, value, test_types, f auto a = tensor_type(n, value_type{2}); auto b = vector_type(n[0], value_type{1}); - auto zero_rank_empty_tensor = tensor_type{}; auto empty = vector_type{}; BOOST_CHECK_THROW(prod(a, b, 0), std::length_error); BOOST_CHECK_THROW(prod(a, b, 9), std::length_error); - BOOST_CHECK_THROW(prod(zero_rank_empty_tensor, b, 1), std::length_error); BOOST_CHECK_THROW(prod(a, empty, 2), std::length_error); } BOOST_AUTO_TEST_CASE( test_tensor_prod_vector_exception ) { - using namespace boost::numeric; - using value_type = float; - using layout_type = ublas::layout::first_order; - using d_tensor_type = ublas::dynamic_tensor; - using vector_type = typename d_tensor_type::vector_type; - - auto t1 = d_tensor_type{ublas::extents<>{},1.f}; - auto v1 = vector_type{3,value_type{1}}; - - BOOST_REQUIRE_THROW(prod(t1,v1,0),std::length_error); - BOOST_REQUIRE_THROW(prod(t1,v1,1),std::length_error); - BOOST_REQUIRE_THROW(prod(t1,v1,3),std::length_error); +// namespace ublas = boost::numeric::ublas; +// using value_type = float; +// using layout_type = ublas::layout::first_order; +// using d_tensor_type = ublas::tensor_dynamic; +// using vector_type = typename d_tensor_type::vector_type; + +// auto t1 = d_tensor_type{ublas::extents<>{},1.f}; +// auto v1 = vector_type{3,value_type{1}}; + +// BOOST_REQUIRE_THROW(prod(t1,v1,0),std::length_error); +// BOOST_REQUIRE_THROW(prod(t1,v1,1),std::length_error); +// BOOST_REQUIRE_THROW(prod(t1,v1,3),std::length_error); } @@ -109,10 +106,10 @@ BOOST_AUTO_TEST_CASE( test_tensor_prod_vector_exception ) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_matrix, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; using matrix_type = typename tensor_type::matrix_type; @@ -120,7 +117,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_matrix, value, test_types, f auto a = tensor_type(n, value_type{2}); - for(auto m = 0u; m < n.size(); ++m){ + for(auto m = 0u; m < ublas::size(n); ++m){ auto b = matrix_type ( n[m], n[m], value_type{1} ); @@ -136,39 +133,37 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_matrix, value, test_types, f auto a = tensor_type(n, value_type{2}); auto b = matrix_type(n[0], n[0], value_type{1}); - auto zero_rank_empty_tensor = tensor_type{}; auto empty = matrix_type{}; BOOST_CHECK_THROW(prod(a, b, 0), std::length_error); BOOST_CHECK_THROW(prod(a, b, 9), std::length_error); - BOOST_CHECK_THROW(prod(zero_rank_empty_tensor, b, 1), std::length_error); - BOOST_CHECK_THROW(prod(a, empty, 2), std::length_error); + BOOST_CHECK_THROW(prod(a, empty, 2), std::invalid_argument); } BOOST_AUTO_TEST_CASE( test_tensor_prod_matrix_exception ) { - using namespace boost::numeric; - using value_type = float; - using layout_type = ublas::layout::first_order; - using d_extents_type = ublas::extents<>; - using d_tensor_type = ublas::dynamic_tensor; - using matrix_type = typename d_tensor_type::matrix_type; +// namespace ublas = boost::numeric::ublas; +// using value_type = float; +// using layout_type = ublas::layout::first_order; +// using d_extents_type = ublas::extents<>; +// using d_tensor_type = ublas::tensor_dynamic; +// using matrix_type = typename d_tensor_type::matrix_type; - auto t1 = d_tensor_type{d_extents_type{},1.f}; - auto m1 = matrix_type{3,3,value_type{1}}; +// auto t1 = d_tensor_type{d_extents_type{},1.f}; +// auto m1 = matrix_type{3,3,value_type{1}}; - BOOST_REQUIRE_THROW(prod(t1,m1,0),std::length_error); - BOOST_REQUIRE_THROW(prod(t1,m1,1),std::length_error); - BOOST_REQUIRE_THROW(prod(t1,m1,3),std::length_error); +// BOOST_REQUIRE_THROW(prod(t1,m1,0),std::length_error); +// BOOST_REQUIRE_THROW(prod(t1,m1,1),std::length_error); +// BOOST_REQUIRE_THROW(prod(t1,m1,3),std::length_error); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_1, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; // left-hand and right-hand side have the // the same number of elements @@ -202,13 +197,13 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_1, value, test_types, BOOST_AUTO_TEST_CASE( test_tensor_prod_tensor_1_exception ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = float; using layout_type = ublas::layout::first_order; using d_extents_type = ublas::extents<>; - using d_tensor_type = ublas::dynamic_tensor; + using d_tensor_type = ublas::tensor_dynamic; + - auto t1 = d_tensor_type{}; std::vector phia = {1,2,3}; std::vector phib = {1,2,3,4,5}; @@ -236,10 +231,11 @@ BOOST_AUTO_TEST_CASE( test_tensor_prod_tensor_1_exception ) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_2, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + using extents_type = typename tensor_type::extents_type; auto compute_factorial = [](auto const& p){ @@ -250,11 +246,11 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_2, value, test_types, }; auto permute_extents = [](auto const& pi, auto const& na){ - auto nb = na; - assert(pi.size() == na.size()); - for(auto j = 0u; j < pi.size(); ++j) - nb[pi[j]-1] = na[j]; - return nb; + auto nb_base = na.base(); + assert(pi.size() == ublas::size(na)); + for(auto j = 0u; j < pi.size(); ++j) + nb_base[pi[j]-1] = na[j]; + return extents_type(nb_base); }; @@ -304,22 +300,22 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_2, value, test_types, auto phia = std::vector(3); auto sphia = std::vector(2); - BOOST_CHECK_THROW(ublas::prod(tensor_type{}, tensor_type({2,1,2}), phia, phia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2,3}), tensor_type(), phia, phia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2,4}), tensor_type({2,1}), phia, phia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2}), tensor_type({2,1,2}), phia, phia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2}), tensor_type({2,1,3}), sphia, phia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2}), tensor_type({2,2}), phia, sphia), std::runtime_error); - BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2}), tensor_type({4,4}), sphia, phia), std::runtime_error); +// BOOST_CHECK_THROW(ublas::prod(tensor_type{}, tensor_type({2,1,2}), phia, phia), std::runtime_error); +// BOOST_CHECK_THROW(ublas::prod(tensor_type({1,2,3}), tensor_type(), phia, phia), std::runtime_error); + BOOST_CHECK_THROW(ublas::prod(tensor_type{1,2,4}, tensor_type{2,1}, phia, phia), std::runtime_error); + BOOST_CHECK_THROW(ublas::prod(tensor_type{1,2}, tensor_type{2,1,2}, phia, phia), std::runtime_error); + BOOST_CHECK_THROW(ublas::prod(tensor_type{1,2}, tensor_type{2,1,3}, sphia, phia), std::runtime_error); + BOOST_CHECK_THROW(ublas::prod(tensor_type{1,2}, tensor_type{2,2}, phia, sphia), std::runtime_error); + BOOST_CHECK_THROW(ublas::prod(tensor_type{1,2}, tensor_type{4,4}, sphia, phia), std::runtime_error); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_inner_prod, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& n : extents) { @@ -333,19 +329,19 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_inner_prod, value, test_types, fi BOOST_CHECK_EQUAL( c , r ); } - BOOST_CHECK_THROW(ublas::inner_prod(tensor_type({1,2,3}), tensor_type({1,2,3,4})), std::length_error); // rank different - BOOST_CHECK_THROW(ublas::inner_prod(tensor_type(), tensor_type()), std::length_error); //empty tensor - BOOST_CHECK_THROW(ublas::inner_prod(tensor_type({1,2,3}), tensor_type({3,2,1})), std::length_error); // different extent + BOOST_CHECK_THROW(ublas::inner_prod(tensor_type{1,2,3}, tensor_type{1,2,3,4}), std::length_error); // rank different +// BOOST_CHECK_THROW(ublas::inner_prod(tensor_type(), tensor_type()), std::length_error); //empty tensor + BOOST_CHECK_THROW(ublas::inner_prod(tensor_type{1,2,3}, tensor_type{3,2,1}), std::length_error); // different extent } BOOST_AUTO_TEST_CASE( test_tensor_inner_prod_exception ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = float; using layout_type = ublas::layout::first_order; using d_extents_type = ublas::extents<>; - using d_tensor_type = ublas::dynamic_tensor; + using d_tensor_type = ublas::tensor_dynamic; auto t1 = d_tensor_type{d_extents_type{1,2},1.f}; auto t2 = d_tensor_type{d_extents_type{1,2,3},1.f}; @@ -354,10 +350,10 @@ BOOST_AUTO_TEST_CASE( test_tensor_inner_prod_exception ) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_norm, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& n : extents) { @@ -373,10 +369,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_norm, value, test_types, fixture auto c = ublas::inner_prod(a, a); auto r = std::inner_product(a.begin(),a.end(), a.begin(),value_type(0)); - tensor_type var = (a+a)/2.0f; // std::complex/int not allowed as expression is captured + tensor_type var = (a+a)/value_type(2); // std::complex/int not allowed as expression is captured auto r2 = ublas::norm( var ); - BOOST_CHECK_THROW(ublas::norm(tensor_type{}), std::runtime_error); +// BOOST_CHECK_THROW(ublas::norm(tensor_type{}), std::runtime_error); BOOST_CHECK_EQUAL( c , r ); BOOST_CHECK_EQUAL( std::sqrt( c ) , r2 ); @@ -386,13 +382,13 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_norm, value, test_types, fixture BOOST_FIXTURE_TEST_CASE( test_tensor_real_imag_conj, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = float; using complex_type = std::complex; using layout_type = ublas::layout::first_order; - using tensor_complex_type = ublas::dynamic_tensor; - using tensor_type = ublas::dynamic_tensor; + using tensor_complex_type = ublas::tensor_dynamic; + using tensor_type = ublas::tensor_dynamic; for(auto const& n : extents) { @@ -460,10 +456,10 @@ BOOST_FIXTURE_TEST_CASE( test_tensor_real_imag_conj, fixture ) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_outer_prod, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& n1 : extents) { auto a = tensor_type(n1, value_type(2)); @@ -500,10 +496,10 @@ void init(std::vector>& a) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_trans, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto fak = [](auto const& p){ auto f = 1ul; @@ -521,8 +517,8 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_trans, value, test_types, fixture for(auto const& n : extents) { - auto const p = n.size(); - auto const s = product(n); + auto const p = ublas::size(n); + auto const s = ublas::product(n); auto aref = tensor_type(n); auto v = value_type{}; for(auto i = 0u; i < s; ++i, v+=1) diff --git a/test/tensor/test_multi_index.cpp b/test/tensor/test_multi_index.cpp index 63289e52f..fdbbac7de 100644 --- a/test/tensor/test_multi_index.cpp +++ b/test/tensor/test_multi_index.cpp @@ -29,46 +29,46 @@ using test_types = zip>::with_t ind(_a, _b); + ublas::multi_index<2> ind(i::_a, i::_b); BOOST_CHECK_EQUAL ( get<0>( ind ), 1 ) ; BOOST_CHECK_EQUAL ( get<1>( ind ), 2 ) ; @@ -76,7 +76,7 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_class_construction ) { - multi_index<2> ind(_d,_c); + ublas::multi_index<2> ind(i::_d,i::_c); BOOST_CHECK_EQUAL ( ind[0] , 4 ) ; BOOST_CHECK_EQUAL ( ind[1] , 3 ) ; @@ -86,58 +86,59 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_class_construction ) BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_multi_index_class_generation, value, test_types ) { - using namespace boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; + namespace i = ublas::index; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto t = std::make_tuple ( - index::_a, // 0 - index::_b, // 1 - index::_c, // 2 - index::_d, // 3 - index::_e // 4 + i::_a, // 0 + i::_b, // 1 + i::_c, // 2 + i::_d, // 3 + i::_e // 4 ); { - auto a = tensor_type(extents<>{2,3}, value_type{2}); + auto a = tensor_type(ublas::extents<>{2,3}, value_type{2}); auto a_ind = a( std::get<0>(t), std::get<2>(t) ); BOOST_CHECK_EQUAL ( std::addressof( a_ind.first ), std::addressof( a ) ) ; - BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), index::_a() ) ; - BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), index::_c() ) ; + BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), i::_a() ) ; + BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), i::_c() ) ; } { - auto a = tensor_type(extents<>{2,3}, value_type{2}); + auto a = tensor_type(ublas::extents<>{2,3}, value_type{2}); auto a_ind = a( std::get<2>(t), std::get<0>(t) ); BOOST_CHECK_EQUAL ( std::addressof( a_ind.first ), std::addressof( a ) ) ; - BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), index::_c() ) ; - BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), index::_a() ) ; + BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), i::_c() ) ; + BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), i::_a() ) ; } { - auto a = tensor_type(extents<>{2,3}, value_type{2}); + auto a = tensor_type(ublas::extents<>{2,3}, value_type{2}); auto a_ind = a( std::get<2>(t), std::get<3>(t) ); BOOST_CHECK_EQUAL (std::addressof( a_ind.first ), std::addressof( a ) ) ; - BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), index::_c() ) ; - BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), index::_d() ) ; + BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), i::_c() ) ; + BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), i::_d() ) ; } { - auto a = tensor_type(extents<>{2,3,4}, value_type{2}); + auto a = tensor_type(ublas::extents<>{2,3,4}, value_type{2}); auto a_ind = a( std::get<2>(t), std::get<3>(t), std::get<0>(t) ); BOOST_CHECK_EQUAL (std::addressof( a_ind.first ), std::addressof( a ) ) ; - BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), index::_c() ) ; - BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), index::_d() ) ; - BOOST_CHECK_EQUAL (std::get<2>(a_ind.second)(), index::_a() ) ; + BOOST_CHECK_EQUAL (std::get<0>(a_ind.second)(), i::_c() ) ; + BOOST_CHECK_EQUAL (std::get<1>(a_ind.second)(), i::_d() ) ; + BOOST_CHECK_EQUAL (std::get<2>(a_ind.second)(), i::_a() ) ; } } diff --git a/test/tensor/test_multi_index_utility.cpp b/test/tensor/test_multi_index_utility.cpp index a4ed17e1c..3cd35de9a 100644 --- a/test/tensor/test_multi_index_utility.cpp +++ b/test/tensor/test_multi_index_utility.cpp @@ -19,40 +19,40 @@ BOOST_AUTO_TEST_SUITE ( test_multi_index_utility ) BOOST_AUTO_TEST_CASE ( test_multi_index_has_index ) { - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; + namespace ublas = boost::numeric::ublas; + namespace i = boost::numeric::ublas::index; { constexpr auto tuple = std::tuple<>{}; - constexpr auto has_a = has_index::value; - constexpr auto has_b = has_index::value; + constexpr auto has_a = ublas::has_index::value; + constexpr auto has_b = ublas::has_index::value; BOOST_CHECK( !has_a ); BOOST_CHECK( !has_b ); } { - constexpr auto tuple = std::make_tuple(_a); - constexpr auto has_a = has_index::value; - constexpr auto has_b = has_index::value; + constexpr auto tuple = std::make_tuple(i::_a); + constexpr auto has_a = ublas::has_index::value; + constexpr auto has_b = ublas::has_index::value; BOOST_CHECK( has_a ); BOOST_CHECK( !has_b ); } { - constexpr auto tuple = std::make_tuple(_a,_b,_,_c,_d); - constexpr auto has_a = has_index::value; - constexpr auto has_b = has_index::value; - constexpr auto has_c = has_index::value; - constexpr auto has_d = has_index::value; - constexpr auto has_e = has_index::value; - constexpr auto has__ = has_index::value; + constexpr auto tuple = std::make_tuple(i::_a,i::_b,i::_,i::_c,i::_d); + constexpr auto has_a = ublas::has_index::value; + constexpr auto has_b = ublas::has_index::value; + constexpr auto has_c = ublas::has_index::value; + constexpr auto has_d = ublas::has_index::value; + constexpr auto has_e = ublas::has_index::value; + constexpr auto has = ublas::has_index::value; BOOST_CHECK( has_a ); BOOST_CHECK( has_b ); BOOST_CHECK( has_c ); BOOST_CHECK( has_d ); BOOST_CHECK( !has_e ); - BOOST_CHECK( has__ ); + BOOST_CHECK( has ); } } @@ -60,55 +60,55 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_has_index ) BOOST_AUTO_TEST_CASE ( test_multi_index_valid ) { - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; + namespace ublas = boost::numeric::ublas; + namespace i = ublas::index; { constexpr auto tuple = std::tuple<>{}; - constexpr auto valid = valid_multi_index::value; + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } { - constexpr auto tuple = std::make_tuple(_a); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_a); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } { - constexpr auto tuple = std::make_tuple(_a,_,_b); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_a,i::_,i::_b); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } { - constexpr auto tuple = std::make_tuple(_a,_,_b,_b); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_a,i::_,i::_b,i::_b); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( !valid ); } { - constexpr auto tuple = std::make_tuple(_c,_a,_,_b,_b); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_a,i::_,i::_b,i::_b); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( !valid ); } { - constexpr auto tuple = std::make_tuple(_c,_a,_,_b); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_a,i::_,i::_b); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } { - constexpr auto tuple = std::make_tuple(_,_c,_a,_,_b); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_,i::_c,i::_a,i::_,i::_b); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } { - constexpr auto tuple = std::make_tuple(_,_c,_a,_,_b,_); - constexpr auto valid = valid_multi_index::value; + constexpr auto tuple = std::make_tuple(i::_,i::_c,i::_a,i::_,i::_b,i::_); + constexpr auto valid = ublas::valid_multi_index::value; BOOST_CHECK( valid ); } } @@ -119,136 +119,136 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_valid ) BOOST_AUTO_TEST_CASE ( test_multi_index_number_equal_indices ) { - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; + namespace ublas = boost::numeric::ublas; + namespace i = ublas::index; { constexpr auto lhs = std::tuple<>{}; constexpr auto rhs = std::tuple<>{}; - constexpr auto num = number_equal_indexes::value; + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 0 ); } { - constexpr auto lhs = std::make_tuple(_a); + constexpr auto lhs = std::make_tuple(i::_a); constexpr auto rhs = std::tuple<>{}; - constexpr auto num = number_equal_indexes::value; + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 0 ); } { constexpr auto lhs = std::tuple<>{}; - constexpr auto rhs = std::make_tuple(_a); - constexpr auto num = number_equal_indexes::value; + constexpr auto rhs = std::make_tuple(i::_a); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 0 ); } { - constexpr auto lhs = std::make_tuple(_b); - constexpr auto rhs = std::make_tuple(_a); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b); + constexpr auto rhs = std::make_tuple(i::_a); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 0 ); } { - constexpr auto lhs = std::make_tuple(_a); - constexpr auto rhs = std::make_tuple(_a); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_a); + constexpr auto rhs = std::make_tuple(i::_a); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } { - constexpr auto lhs = std::make_tuple(_a,_b); - constexpr auto rhs = std::make_tuple(_a); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_a,i::_b); + constexpr auto rhs = std::make_tuple(i::_a); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } { - constexpr auto lhs = std::make_tuple(_b); - constexpr auto rhs = std::make_tuple(_a,_b); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } { - constexpr auto lhs = std::make_tuple(_a); - constexpr auto rhs = std::make_tuple(_a); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_a); + constexpr auto rhs = std::make_tuple(i::_a); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } { - constexpr auto lhs = std::make_tuple(_a,_b); - constexpr auto rhs = std::make_tuple(_a,_b); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_a,i::_b); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 2 ); } { - constexpr auto lhs = std::make_tuple(_b,_a); - constexpr auto rhs = std::make_tuple(_a,_b); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 2 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_c); - constexpr auto rhs = std::make_tuple(_a,_b); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_c); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 2 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_c); - constexpr auto rhs = std::make_tuple(_a,_b,_d); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_c); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 2 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_d); - constexpr auto rhs = std::make_tuple(_a,_b,_d); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 3 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_d); - constexpr auto rhs = std::make_tuple(_a,_b,_d,_); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d,i::_); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 3 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_d,_); - constexpr auto rhs = std::make_tuple(_a,_b,_d,_); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d,i::_); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 3 ); } { - constexpr auto lhs = std::make_tuple(_b,_a,_d,_); - constexpr auto rhs = std::make_tuple( _,_b,_d,_); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 2 ); } { - constexpr auto lhs = std::make_tuple(_,_a,_d,_); - constexpr auto rhs = std::make_tuple(_,_b,_d,_); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } { - constexpr auto lhs = std::make_tuple(_,_a,_d,_); - constexpr auto rhs = std::make_tuple(_,_b,_d,_,_); - constexpr auto num = number_equal_indexes::value; + constexpr auto lhs = std::make_tuple(i::_,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_,i::_); + constexpr auto num = ublas::number_equal_indexes::value; BOOST_CHECK_EQUAL( num, 1 ); } } @@ -261,42 +261,42 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_number_equal_indices ) BOOST_AUTO_TEST_CASE ( test_multi_index_index_position ) { - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; + namespace ublas = boost::numeric::ublas; + namespace i = ublas::index; { constexpr auto tuple = std::tuple<>{}; - constexpr auto ind = index_position::value; + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,0); } { - constexpr auto tuple = std::make_tuple(_); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,0); } { - constexpr auto tuple = std::make_tuple(_); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,1); } { - constexpr auto tuple = std::make_tuple(_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,0); } { - constexpr auto tuple = std::make_tuple(_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,1); } { - constexpr auto tuple = std::make_tuple(_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,2); } @@ -304,26 +304,26 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position ) { - constexpr auto tuple = std::make_tuple(_c,_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,0); } { - constexpr auto tuple = std::make_tuple(_c,_,_a,_); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_,i::_a,i::_); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,1); } { - constexpr auto tuple = std::make_tuple(_c,_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,2); } { - constexpr auto tuple = std::make_tuple(_c,_,_a); - constexpr auto ind = index_position::value; + constexpr auto tuple = std::make_tuple(i::_c,i::_,i::_a); + constexpr auto ind = ublas::index_position::value; BOOST_CHECK_EQUAL(ind,3); } @@ -338,71 +338,71 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position ) BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) { - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; + namespace ublas = boost::numeric::ublas; + namespace i = ublas::index; { constexpr auto lhs = std::tuple<>{}; constexpr auto rhs = std::tuple<>{}; - auto array = index_position_pairs(lhs, rhs); + auto array = ublas::index_position_pairs(lhs, rhs); BOOST_CHECK_EQUAL(array.size(), 0ul ); } { - constexpr auto lhs = std::make_tuple(_a); + constexpr auto lhs = std::make_tuple(i::_a); constexpr auto rhs = std::tuple<>{}; - auto array = index_position_pairs(lhs, rhs); + auto array = ublas::index_position_pairs(lhs, rhs); BOOST_CHECK_EQUAL(array.size(), 0ul ); } { constexpr auto lhs = std::tuple<>{}; - constexpr auto rhs = std::make_tuple(_a); - auto array = index_position_pairs(lhs, rhs); + constexpr auto rhs = std::make_tuple(i::_a); + auto array = ublas::index_position_pairs(lhs, rhs); BOOST_CHECK_EQUAL(array.size(), 0ul ); } { - constexpr auto lhs = std::make_tuple(_b); - constexpr auto rhs = std::make_tuple(_a); - auto array = index_position_pairs(lhs, rhs); + constexpr auto lhs = std::make_tuple(i::_b); + constexpr auto rhs = std::make_tuple(i::_a); + auto array = ublas::index_position_pairs(lhs, rhs); BOOST_CHECK_EQUAL(array.size(), 0ul ); } { - constexpr auto lhs = std::make_tuple(_a); - constexpr auto rhs = std::make_tuple(_a); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_a); + constexpr auto rhs = std::make_tuple(i::_a); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 0 ); } { - constexpr auto lhs = std::make_tuple(_a,_b); - constexpr auto rhs = std::make_tuple(_a); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_a,i::_b); + constexpr auto rhs = std::make_tuple(i::_a); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 0 ); } { - constexpr auto lhs = std::make_tuple(_b); - constexpr auto rhs = std::make_tuple(_a,_b); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_b); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); } { - constexpr auto lhs = std::make_tuple(_a); - constexpr auto rhs = std::make_tuple(_a); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_a); + constexpr auto rhs = std::make_tuple(i::_a); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 0 ); } @@ -410,10 +410,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) { - constexpr auto lhs = std::make_tuple(_a,_b); - constexpr auto rhs = std::make_tuple(_a,_b); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 2ul ); + constexpr auto lhs = std::make_tuple(i::_a,i::_b); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 2ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 0 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -421,10 +421,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a); - constexpr auto rhs = std::make_tuple(_a,_b); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 2ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 2ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -432,10 +432,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_c); - constexpr auto rhs = std::make_tuple(_a,_b); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 2ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_c); + constexpr auto rhs = std::make_tuple(i::_a,i::_b); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 2ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -443,10 +443,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_c); - constexpr auto rhs = std::make_tuple(_a,_b,_d); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 2ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_c); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 2ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -454,10 +454,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_d); - constexpr auto rhs = std::make_tuple(_a,_b,_d); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 3ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 3ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -467,10 +467,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_d); - constexpr auto rhs = std::make_tuple(_a,_b,_d,_); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 3ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d,i::_); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 3ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -480,10 +480,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_d,_); - constexpr auto rhs = std::make_tuple(_a,_b,_d,_); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 3ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_a,i::_b,i::_d,i::_); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 3ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 1 ); @@ -493,10 +493,10 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_b,_a,_d,_); - constexpr auto rhs = std::make_tuple( _,_b,_d,_); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 2ul ); + constexpr auto lhs = std::make_tuple(i::_b,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 2ul ); BOOST_CHECK_EQUAL(array[0].first , 0 ); BOOST_CHECK_EQUAL(array[0].second, 1 ); BOOST_CHECK_EQUAL(array[1].first , 2 ); @@ -504,19 +504,19 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) } { - constexpr auto lhs = std::make_tuple(_,_a,_d,_); - constexpr auto rhs = std::make_tuple(_,_b,_d,_); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 2 ); BOOST_CHECK_EQUAL(array[0].second, 2 ); } { - constexpr auto lhs = std::make_tuple(_,_a,_d,_); - constexpr auto rhs = std::make_tuple(_,_b,_d,_,_); - auto array = index_position_pairs(lhs, rhs); - BOOST_ASSERT(array.size() == 1ul ); + constexpr auto lhs = std::make_tuple(i::_,i::_a,i::_d,i::_); + constexpr auto rhs = std::make_tuple(i::_,i::_b,i::_d,i::_,i::_); + auto array = ublas::index_position_pairs(lhs, rhs); + BOOST_STATIC_ASSERT(array.size() == 1ul ); BOOST_CHECK_EQUAL(array[0].first , 2 ); BOOST_CHECK_EQUAL(array[0].second, 2 ); } @@ -524,39 +524,39 @@ BOOST_AUTO_TEST_CASE ( test_multi_index_index_position_pairs ) -BOOST_AUTO_TEST_CASE ( test_multi_index_array_to_vector ) -{ - using namespace boost::numeric::ublas; - using namespace boost::numeric::ublas::index; +//BOOST_AUTO_TEST_CASE ( test_multi_index_array_to_vector ) +//{ +// namespace ublas = boost::numeric::ublas; +// namespace i = ublas::index; - auto check = [](auto const& lhs, auto const& rhs) - { - auto array = index_position_pairs(lhs, rhs); +// auto check = [](auto const& lhs, auto const& rhs) +// { +// auto array = ublas::index_position_pairs(lhs, rhs); - auto vector_pair = array_to_vector( array ); +// auto vector_pair = ublas::array_to_vector( array ); - BOOST_CHECK_EQUAL(vector_pair.first .size(), array.size() ); - BOOST_CHECK_EQUAL(vector_pair.second.size(), array.size() ); +// BOOST_CHECK_EQUAL(vector_pair.first .size(), array.size() ); +// BOOST_CHECK_EQUAL(vector_pair.second.size(), array.size() ); - for(auto i = 0ul; i < array.size(); ++i) - { - BOOST_CHECK_EQUAL(vector_pair.first [i], array[i].first +1 ); - BOOST_CHECK_EQUAL(vector_pair.second[i], array[i].second+1 ); - } +// for(auto i = 0ul; i < array.size(); ++i) +// { +// BOOST_CHECK_EQUAL(vector_pair.first [i], array[i].first +1 ); +// BOOST_CHECK_EQUAL(vector_pair.second[i], array[i].second+1 ); +// } - }; +// }; - check(std::tuple<>{} , std::tuple<>{}); - check(std::make_tuple(_a) , std::tuple<>{}); - check(std::tuple<>{} , std::make_tuple(_a)); - check(std::make_tuple(_a) , std::make_tuple(_b)); - check(std::make_tuple(_a) , std::make_tuple(_a)); - check(std::make_tuple(_a,_b), std::make_tuple(_a)); - check(std::make_tuple(_a) , std::make_tuple(_a,_b)); - check(std::make_tuple(_a,_b), std::make_tuple(_a,_b)); - check(std::make_tuple(_b,_a), std::make_tuple(_a,_b)); - check(std::make_tuple(_b,_a,_c), std::make_tuple(_a,_b,_d)); -} +// check(std::tuple<>{} , std::tuple<>{}); +// check(std::make_tuple(i::_a) , std::tuple<>{}); +// check(std::tuple<>{} , std::make_tuple(i::_a)); +// check(std::make_tuple(i::_a) , std::make_tuple(i::_b)); +// check(std::make_tuple(i::_a) , std::make_tuple(i::_a)); +// check(std::make_tuple(i::_a,i::_b), std::make_tuple(i::_a)); +// check(std::make_tuple(i::_a) , std::make_tuple(i::_a,i::_b)); +// check(std::make_tuple(i::_a,i::_b), std::make_tuple(i::_a,i::_b)); +// check(std::make_tuple(i::_b,i::_a), std::make_tuple(i::_a,i::_b)); +// check(std::make_tuple(i::_b,i::_a,i::_c), std::make_tuple(i::_a,i::_b,i::_d)); +//} diff --git a/test/tensor/test_multiplication.cpp b/test/tensor/test_multiplication.cpp index 19affdeb8..c5ed51e5f 100644 --- a/test/tensor/test_multiplication.cpp +++ b/test/tensor/test_multiplication.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -16,10 +16,7 @@ #include #include -#include -#include -#include -#include +#include #include "utility.hpp" #include @@ -35,386 +32,384 @@ using test_types = zip>::with_t; - fixture() - : extents { - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - extents_type{2,3}, // 4 - extents_type{5,4}, // 5 - extents_type{2,3,1}, // 6 - extents_type{4,1,3}, // 7 - extents_type{1,2,3}, // 8 - extents_type{4,2,3}, // 9 - extents_type{4,2,3,5}} // 10 - { - } - std::vector extents; + using extents_t = boost::numeric::ublas::extents<>; + const std::vector extents = + { + extents_t{1,1}, // 1 + extents_t{1,2}, // 2 + extents_t{2,1}, // 3 + extents_t{2,3}, // 4 + extents_t{5,4}, // 5 + extents_t{2,3,1}, // 6 + extents_t{4,1,3}, // 7 + extents_t{1,2,3}, // 8 + extents_t{4,2,3}, // 9 + extents_t{4,2,3,5} // 10 + }; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE(test_tensor_mtv, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using extents_type_base = typename extents_type::base_type; - using size_type = typename extents_type_base::value_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; + using extents_base_t = typename extents_t::base_type; - for(auto const& na : extents) { + for(auto const& na : extents) { - if(na.size() > 2) - continue; + if(ublas::size(na) > 2) + continue; - auto a = vector_type(product(na), value_type{2}); - auto wa = strides_type(na); - for(auto m = 0u; m < na.size(); ++m){ - auto nb = extents_type {na[m],1}; - auto wb = strides_type (nb); - auto b = vector_type (product(nb), value_type{1} ); + auto a = vector_t(ublas::product(na), value_t{2}); + auto wa = ublas::to_strides(na,layout_t{}); + for(auto m = std::size_t{0}; m < ublas::size(na); ++m){ + auto nb = extents_t {na[m],std::size_t{1}}; + auto wb = ublas::to_strides(nb,layout_t{}); + auto b = vector_t (ublas::product(nb), value_t{1} ); - auto nc_base = extents_type_base(std::max(na.size()-1, size_type{2}), 1); + auto nc_base = extents_base_t(std::max(std::size_t{ublas::size(na)-1u}, std::size_t{2}), 1); - for(auto i = 0u, j = 0u; i < na.size(); ++i) - if(i != m) - nc_base[j++] = na[i]; + for(auto i = 0ul, j = 0ul; i < ublas::size(na); ++i) + if(i != m) + nc_base[j++] = na[i]; - auto nc = extents_type (nc_base); - auto wc = strides_type (nc); - auto c = vector_type (product(nc), value_type{0}); + auto nc = extents_t (nc_base); + auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t (ublas::product(nc), value_t{0}); - ublas::detail::recursive::mtv( - size_type(m), - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data()); + ublas::detail::recursive::mtv( + m, + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data()); + auto v = value_t(na[m]); + BOOST_CHECK(std::equal(c.begin(),c.end(),a.begin(), [v](auto cc, auto aa){return cc == v*aa;})); - for(auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , value_type( static_cast< inner_type_t >(na[m]) ) * a[i] ); +// for(auto i = 0u; i < c.size(); ++i) +// BOOST_CHECK_EQUAL( c[i] , value_t( static_cast< inner_type_t >(na[m]) ) * a[i] ); - } } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_mtm, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - // using extents_type_base = typename extents_type::base_type; - + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; - for(auto const& na : extents) { + for(auto const& na : extents) { - if(na.size() != 2) - continue; + if(ublas::size(na) != 2) + continue; - auto a = vector_type (product(na), value_type{2}); - auto wa = strides_type (na); + auto a = vector_t (ublas::product(na), value_t{2}); + auto wa = ublas::to_strides(na,layout_t{}); - auto nb = extents_type {na[1],na[0]}; - auto wb = strides_type (nb); - auto b = vector_type (product(nb), value_type{1} ); + auto nb = extents_t {na[1],na[0]}; + auto wb = ublas::to_strides(nb,layout_t{}); + auto b = vector_t (ublas::product(nb), value_t{1} ); - auto nc = extents_type {na[0],nb[1]}; -auto wc = strides_type (nc); -auto c = vector_type (product(nc)); + auto nc = extents_t {na[0],nb[1]}; + auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t (ublas::product(nc)); -ublas::detail::recursive::mtm( - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data(), nb.data(), wb.data()); + ublas::detail::recursive::mtm( + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data(), nb.data(), wb.data()); + auto v = value_t(na[1])*a[0]; + BOOST_CHECK(std::all_of(c.begin(),c.end(), [v](auto cc){return cc == v;})); -for(auto i = 0u; i < c.size(); ++i) -BOOST_CHECK_EQUAL( c[i] , value_type( static_cast< inner_type_t >(na[1]) ) * a[0] ); +// for(auto i = 0u; i < c.size(); ++i) +// BOOST_CHECK_EQUAL( c[i] , value_t( static_cast< inner_type_t >(na[1]) ) * a[0] ); -} + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ttv, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using extents_type_base = typename extents_type::base_type; - using size_type = typename extents_type_base::value_type; - + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; + using extents_base_t = typename extents_t::base_type; - for(auto const& na : extents) { + for(auto const& na : extents) { - auto a = vector_type(product(na), value_type{2}); - auto wa = strides_type(na); - for(auto m = 0u; m < na.size(); ++m){ - auto b = vector_type (na[m], value_type{1} ); - auto nb = extents_type {na[m],1}; - auto wb = strides_type (nb); + auto a = vector_t(ublas::product(na), value_t{2}); + auto wa = ublas::to_strides(na,layout_t{}); + for(auto m = std::size_t{0}; m < ublas::size(na); ++m){ + auto b = vector_t (na[m], value_t{1} ); + auto nb = extents_t {na[m],1}; + auto wb = ublas::to_strides(nb,layout_t{}); - auto nc_base = extents_type_base(std::max(na.size()-1, size_type(2)),1); + auto nc_base = extents_base_t(std::max(std::size_t{ublas::size(na)-1u}, std::size_t{2}),1); - for(auto i = 0ul, j = 0ul; i < na.size(); ++i) - if(i != m) - nc_base[j++] = na[i]; + for(auto i = 0ul, j = 0ul; i < ublas::size(na); ++i) + if(i != m) + nc_base[j++] = na[i]; - auto nc = extents_type (nc_base); - auto wc = strides_type (nc); - auto c = vector_type (product(nc), value_type{0}); + auto nc = extents_t (nc_base); + auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t (ublas::product(nc), value_t{0}); - ublas::ttv(size_type(m+1), na.size(), - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data(), nb.data(), wb.data()); + ublas::ttv(m+1, ublas::size(na), + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data(), nb.data(), wb.data()); + auto v = value_t(na[m]); + BOOST_CHECK(std::equal(c.begin(),c.end(),a.begin(), [v](auto cc, auto aa){return cc == v*aa;})); - for(auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , value_type(na[m]) * a[i] ); +// for(auto i = 0u; i < c.size(); ++i) +// BOOST_CHECK_EQUAL( c[i] , value_t(na[m]) * a[i] ); - } } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ttm, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using size_type = typename extents_type::value_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; + + + for(auto const& na : extents) { + + auto a = vector_t(ublas::product(na), value_t{2}); + auto wa = ublas::to_strides(na,layout_t{}); + for(auto m = std::size_t{0}; m < ublas::size(na); ++m){ + const auto nb = extents_t {na[m], na[m] }; + const auto b = vector_t (ublas::product(nb), value_t{1} ); + const auto wb = ublas::to_strides(nb,layout_t{}); - for(auto const& na : extents) { + const auto& nc = na; + const auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t (ublas::product(nc), value_t{0}); - auto a = vector_type(product(na), value_type{2}); - auto wa = strides_type(na); - for(auto m = 0u; m < na.size(); ++m){ - auto nb = extents_type {na[m], na[m] }; - auto b = vector_type (product(nb), value_type{1} ); - auto wb = strides_type (nb); + ublas::ttm(m+1, ublas::size(na), + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data(), nb.data(), wb.data()); - auto nc = na; - auto wc = strides_type (nc); - auto c = vector_type (product(nc), value_type{0}); + auto v = value_t(na[m]); + BOOST_CHECK(std::equal(c.begin(),c.end(),a.begin(), [v](auto cc, auto aa){return cc == v*aa;})); - ublas::ttm(size_type(m+1), na.size(), - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data(), nb.data(), wb.data()); - for(auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , value_type( static_cast< inner_type_t >(na[m]) ) * a[i] ); +// for(auto i = 0u; i < c.size(); ++i) +// BOOST_CHECK_EQUAL( c[i] , value_t( static_cast< inner_type_t >(na[m]) ) * a[i] ); - } } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ttt_permutation, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using size_type = typename strides_type::value_type; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; + using extents_base_t = typename extents_t::base_type; - auto compute_factorial = [](auto const& p){ - auto f = 1ul; - for(auto i = 1u; i <= p; ++i) - f *= i; - return f; - }; + auto compute_factorial = [](auto const& p){ + auto f = 1ul; + for(auto i = 1u; i <= p; ++i) + f *= i; + return f; + }; - auto compute_inverse_permutation = [](auto const& pi){ - auto pi_inv = pi; - for(auto j = 0u; j < pi.size(); ++j) - pi_inv[pi[j]-1] = j+1; - return pi_inv; - }; + auto compute_inverse_permutation = [](auto const& pi){ + auto pi_inv = pi; + for(auto j = 0u; j < pi.size(); ++j) + pi_inv[pi[j]-1] = j+1; + return pi_inv; + }; - auto permute_extents = [](auto const& pi, auto const& na){ - auto nb = na; - assert(pi.size() == na.size()); - for(auto j = 0u; j < pi.size(); ++j) - nb[j] = na[pi[j]-1]; - return nb; - }; + auto permute_extents = [](auto const& pi, auto const& na){ + auto nb_base = na.base(); + assert(pi.size() == ublas::size(na)); + for(auto j = 0u; j < pi.size(); ++j) + nb_base[j] = na[pi[j]-1]; + return extents_t(nb_base); + }; - // left-hand and right-hand side have the - // the same number of elements + // left-hand and right-hand side have the + // the same number of elements - // computing the inner product with - // different permutation tuples for - // right-hand side + // computing the inner product with + // different permutation tuples for + // right-hand side - for(auto const& na : extents) { + for(auto const& na : extents) { - auto wa = strides_type(na); - auto a = vector_type(product(na), value_type{2}); - auto pa = na.size(); - auto pia = std::vector(pa); - std::iota( pia.begin(), pia.end(), 1 ); + auto wa = ublas::to_strides(na,layout_t{}); + auto a = vector_t(ublas::product(na), value_t{2}); + auto pa = ublas::size(na); + auto pia = std::vector(pa); + std::iota( pia.begin(), pia.end(), std::size_t{1} ); - auto pib = pia; - auto pib_inv = compute_inverse_permutation(pib); + auto pib = pia; + auto pib_inv = compute_inverse_permutation(pib); - auto f = compute_factorial(pa); + auto f = compute_factorial(pa); - // for the number of possible permutations - // only permutation tuple pib is changed. - for(auto i = 0u; i < f; ++i) { + // for the number of possible permutations + // only permutation tuple pib is changed. + for(auto i = 0u; i < f; ++i) { - auto nb = permute_extents( pib, na ); - auto wb = strides_type(nb); - auto b = vector_type(product(nb), value_type{3}); - auto pb = nb.size(); + auto nb = permute_extents( pib, na ); + auto wb = ublas::to_strides(nb,layout_t{}); + auto b = vector_t(ublas::product(nb), value_t{3}); + auto pb = ublas::size(nb); - // the number of contractions is changed. - for( auto q = size_type(0); q <= pa; ++q) { + // the number of contractions is changed. + for(auto q = std::size_t{0}; q <= pa; ++q) { - auto r = pa - q; - auto s = pb - q; + auto r = pa - q; + auto s = pb - q; - auto pc = r+s > 0 ? std::max(r+s,size_type(2)) : size_type(2); + auto pc = r+s > 0 ? std::max(std::size_t{r+s},std::size_t{2}) : std::size_t{2}; - auto nc_base = std::vector( pc , 1 ); + auto nc_base = extents_base_t(pc,std::size_t{1}); - for(auto j = 0u; j < r; ++j) - nc_base[ j ] = na[ pia[j]-1 ]; + for(auto j = 0u; j < r; ++j) + nc_base[j] = na[pia[j]-1]; - for(auto j = 0u; j < s; ++j) - nc_base[ r + j ] = nb[ pib_inv[j]-1 ]; + for(auto j = 0u; j < s; ++j) + nc_base[r+j] = nb[ pib_inv[j]-1 ]; - auto nc = extents_type ( nc_base ); - auto wc = strides_type ( nc ); - auto c = vector_type ( product(nc), value_type(0) ); + auto nc = extents_t ( nc_base ); + auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t ( ublas::product(nc), value_t(0) ); - ublas::ttt(pa,pb,q, - pia.data(), pib_inv.data(), - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data(), nb.data(), wb.data()); + ublas::ttt(pa,pb,q, + pia.data(), pib_inv.data(), + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data(), nb.data(), wb.data()); - auto acc = value_type(1); - for(auto j = r; j < pa; ++j) - acc *= value_type( static_cast< inner_type_t >(na[pia[j]-1]) ); + auto acc = std::size_t{1}; + for(auto j = r; j < pa; ++j) + acc *= na[pia[j]-1]; - for(auto j = 0ul; j < c.size(); ++j) - BOOST_CHECK_EQUAL( c[j] , acc * a[0] * b[0] ); + auto v = value_t(acc)*a[0]*b[0]; - } + BOOST_CHECK( std::all_of(c.begin(),c.end(), [v](auto cc){return cc == v; } ) ); - std::next_permutation(pib.begin(), pib.end()); - pib_inv = compute_inverse_permutation(pib); - } + } + + std::next_permutation(pib.begin(), pib.end()); + pib_inv = compute_inverse_permutation(pib); } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ttt, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using vector_type = std::vector; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using size_type = typename strides_type::value_type; - - // left-hand and right-hand side have the - // the same number of elements + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + using extents_t = ublas::extents<>; + using extents_base_t = typename extents_t::base_type; - // computing the inner product with - // different permutation tuples for - // right-hand side + // left-hand and right-hand side have the + // the same number of elements - for(auto const& na : extents) { + // computing the inner product with + // different permutation tuples for + // right-hand side - auto wa = strides_type(na); - auto a = vector_type(product(na), value_type{2}); - auto pa = na.size(); + for(auto const& na : extents) { - auto nb = na; - auto wb = strides_type(nb); - auto b = vector_type(product(nb), value_type{3}); - auto pb = nb.size(); + auto wa = ublas::to_strides(na,layout_t{}); + auto a = vector_t(ublas::product(na), value_t{2}); + auto pa = ublas::size(na); - // std::cout << "na = "; - // std::copy(na.begin(), na.end(), std::ostream_iterator(std::cout, " ")); - // std::cout << std::endl; + auto const& nb = na; + auto wb = ublas::to_strides(nb,layout_t{}); + auto b = vector_t(ublas::product(nb), value_t{3}); + auto pb = ublas::size(nb); - // std::cout << "nb = "; - // std::copy(nb.begin(), nb.end(), std::ostream_iterator(std::cout, " ")); - // std::cout << std::endl; + // std::cout << "na = "; + // std::copy(na.begin(), na.end(), std::ostream_iterator(std::cout, " ")); + // std::cout << std::endl; + // std::cout << "nb = "; + // std::copy(nb.begin(), nb.end(), std::ostream_iterator(std::cout, " ")); + // std::cout << std::endl; - // the number of contractions is changed. - for( auto q = size_type(0); q <= pa; ++q) { // pa - auto r = pa - q; - auto s = pb - q; + // the number of contractions is changed. + for( auto q = std::size_t{0}; q <= pa; ++q) { // pa - auto pc = r+s > 0 ? std::max(r+s, size_type(2)) : size_type(2); + auto r = pa - q; + auto s = pb - q; - auto nc_base = std::vector( pc , 1 ); + auto pc = r+s > 0 ? std::max(std::size_t{r+s},std::size_t{2}) : std::size_t{2}; - for(auto i = 0u; i < r; ++i) - nc_base[ i ] = na[ i ]; + auto nc_base = extents_base_t(pc,std::size_t{1}); - for(auto i = 0u; i < s; ++i) - nc_base[ r + i ] = nb[ i ]; + for(auto i = 0u; i < r; ++i) + nc_base[i] = na[i]; - auto nc = extents_type ( nc_base ); - auto wc = strides_type ( nc ); - auto c = vector_type ( product(nc), value_type{0} ); + for(auto i = 0u; i < s; ++i) + nc_base[r+i] = nb[i]; - // std::cout << "nc = "; - // std::copy(nc.begin(), nc.end(), std::ostream_iterator(std::cout, " ")); - // std::cout << std::endl; + auto nc = extents_t ( nc_base ); + auto wc = ublas::to_strides(nc,layout_t{}); + auto c = vector_t ( ublas::product(nc), value_t{0} ); - ublas::ttt(pa,pb,q, - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data(), nb.data(), wb.data()); + // std::cout << "nc = "; + // std::copy(nc.begin(), nc.end(), std::ostream_iterator(std::cout, " ")); + // std::cout << std::endl; + ublas::ttt(pa,pb,q, + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + b.data(), nb.data(), wb.data()); - auto acc = value_type(1); - for(auto i = r; i < pa; ++i) - acc *= value_type( static_cast< inner_type_t >(na[i]) ); - for(auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , acc * a[0] * b[0] ); + auto acc = std::size_t{1}; + for(auto i = r; i < pa; ++i) + acc *= na[i]; - } + auto v = value_t(acc)*a[0]*b[0]; + BOOST_CHECK( std::all_of(c.begin(),c.end(), [v](auto cc){return cc == v; } ) ); } + + } } @@ -423,70 +418,68 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ttt, value, test_types, fixture ) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_inner, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using strides_type = ublas::strides_t,layout_type>; - using vector_type = std::vector; + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using vector_t = std::vector; + for(auto const& n : extents) { - for(auto const& n : extents) { + auto a = vector_t(ublas::product(n), value_t{2}); + auto b = vector_t(ublas::product(n), value_t{3}); + auto w = ublas::to_strides(n,layout_t{}); - auto a = vector_type(product(n), value_type{2}); - auto b = vector_type(product(n), value_type{3}); - auto w = strides_type(n); + auto c = ublas::inner(ublas::size(n), n.data(), a.data(), w.data(), b.data(), w.data(), value_t(0)); + auto cref = std::inner_product(a.begin(), a.end(), b.begin(), value_t(0)); - auto c = ublas::inner(n.size(), n.data(), a.data(), w.data(), b.data(), w.data(), value_type(0)); - auto cref = std::inner_product(a.begin(), a.end(), b.begin(), value_type(0)); + BOOST_CHECK_EQUAL( c , cref ); - BOOST_CHECK_EQUAL( c , cref ); - - } + } } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_outer, value, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using extents_type = ublas::extents<>; - using strides_type = ublas::strides_t; - using vector_type = std::vector; - + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + using extents_t = ublas::extents<>; + using vector_t = std::vector; - for(auto const& na : extents) { - auto a = vector_type(product(na), value_type{2}); - auto wa = strides_type(na); + for(auto const& na : extents) { - for(auto const& nb : extents) { + auto a = vector_t(ublas::product(na), value_t{2}); + auto wa = ublas::to_strides(na,layout_t{}); - auto b = vector_type(product(nb), value_type{3}); - auto wb = strides_type(nb); + for(auto const& nb : extents) { - auto c = vector_type(product(nb)*product(na)); - auto nc = typename extents_type::base_type(na.size()+nb.size()); + auto b = vector_t(ublas::product(nb), value_t{3}); + auto wb = ublas::to_strides(nb,layout_t{}); - for(auto i = 0u; i < na.size(); ++i) - nc[i] = na[i]; - for(auto i = 0u; i < nb.size(); ++i) - nc[i+na.size()] = nb[i]; + auto c = vector_t(ublas::product(nb)*ublas::product(na)); + auto nc_base = typename extents_t::base_type(ublas::size(na)+ublas::size(nb)); - auto wc = strides_type(extents_type(nc)); + for(auto i = 0u; i < ublas::size(na); ++i) + nc_base[i] = na[i]; + for(auto i = 0u; i < ublas::size(nb); ++i) + nc_base[i+ublas::size(na)] = nb[i]; - ublas::outer(c.data(), nc.size(), nc.data(), wc.data(), - a.data(), na.size(), na.data(), wa.data(), - b.data(), nb.size(), nb.data(), wb.data()); + auto nc = extents_t(nc_base); + auto wc = ublas::to_strides(nc,layout_t{}); - for(auto const& cc : c) - BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); - } + ublas::outer(c.data(), ublas::size(nc), nc.data(), wc.data(), + a.data(), ublas::size(na), na.data(), wa.data(), + b.data(), ublas::size(nb), nb.data(), wb.data()); + for(auto const& cc : c) + BOOST_CHECK_EQUAL( cc , a[0]*b[0] ); } + } + } diff --git a/test/tensor/test_operators_arithmetic.cpp b/test/tensor/test_operators_arithmetic.cpp index 723fffe11..98484ccf7 100644 --- a/test/tensor/test_operators_arithmetic.cpp +++ b/test/tensor/test_operators_arithmetic.cpp @@ -17,7 +17,7 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_tensor_arithmetic_operations, * boost::unit_test::depends_on("test_tensor")) +BOOST_AUTO_TEST_SUITE(test_tensor_arithmetic_operations/*, * boost::unit_test::depends_on("test_tensor")*/) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; @@ -25,22 +25,21 @@ using test_types = zip::with_t; - fixture() - : extents{ - extents_type{}, // 0 - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{4,1,3}, // 6 - extents_type{1,2,3}, // 7 - extents_type{4,2,3}, // 8 - extents_type{4,2,3,5}} // 9 + using extents_type = boost::numeric::ublas::extents<>; + + std::vector extents = { - } - std::vector extents; +// extents_type{}, // 0 + extents_type{1,1}, // 1 + extents_type{1,2}, // 2 + extents_type{2,1}, // 3 + extents_type{2,3}, // 4 + extents_type{2,3,1}, // 5 + extents_type{4,1,3}, // 6 + extents_type{1,2,3}, // 7 + extents_type{4,2,3}, // 8 + extents_type{4,2,3,5} // 9 + }; }; @@ -48,10 +47,10 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) @@ -86,11 +85,6 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, valu BOOST_CHECK_EQUAL ( r(i), 4 ); - r = tensor_type (e,1) + tensor_type (e,1); - - for(auto i = 0ul; i < r.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2 ); - r = t * t * t * t2; for(auto i = 0ul; i < t.size(); ++i) @@ -117,10 +111,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, valu BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) @@ -180,10 +174,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) diff --git a/test/tensor/test_operators_comparison.cpp b/test/tensor/test_operators_comparison.cpp index 6e4932f41..b6aeb191a 100644 --- a/test/tensor/test_operators_comparison.cpp +++ b/test/tensor/test_operators_comparison.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 Cem Bassoy +// Copyright (c) 2018 Cem Bassoy // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -19,155 +19,155 @@ #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_tensor_comparison, * boost::unit_test::depends_on("test_tensor")) +BOOST_AUTO_TEST_SUITE(test_tensor_comparison/*, * boost::unit_test::depends_on("test_tensor")*/) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; using test_types = zip::with_t; struct fixture { - using extents_type = boost::numeric::ublas::extents<>; - fixture() - : extents{ - extents_type{}, // 0 - extents_type{1,1}, // 1 - extents_type{1,2}, // 2 - extents_type{2,1}, // 3 - extents_type{2,3}, // 4 - extents_type{2,3,1}, // 5 - extents_type{4,1,3}, // 6 - extents_type{1,2,3}, // 7 - extents_type{4,2,3}, // 8 - extents_type{4,2,3,5}} // 9 - { - } - std::vector extents; + using extents_type = boost::numeric::ublas::extents<>; + fixture() + : extents{ + // extents_type{}, // 0 + extents_type{1,1}, // 1 + extents_type{1,2}, // 2 + extents_type{2,1}, // 3 + extents_type{2,3}, // 4 + extents_type{2,3,1}, // 5 + extents_type{4,1,3}, // 6 + extents_type{1,2,3}, // 7 + extents_type{4,2,3}, // 8 + extents_type{4,2,3,5}} // 9 + { + } + std::vector extents; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - - - auto check = [](auto const& e) - { - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto v = value_type {}; - - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); - - BOOST_CHECK( t == t ); - BOOST_CHECK( t != t2 ); - - if(t.empty()) - return; - - BOOST_CHECK(!(t < t)); - BOOST_CHECK(!(t > t)); - BOOST_CHECK( t < t2 ); - BOOST_CHECK( t2 > t ); - BOOST_CHECK( t <= t ); - BOOST_CHECK( t >= t ); - BOOST_CHECK( t <= t2 ); - BOOST_CHECK( t2 >= t ); - BOOST_CHECK( t2 >= t2 ); - BOOST_CHECK( t2 >= t ); - }; - - for(auto const& e : extents) - check(e); - - auto e0 = extents.at(0); - auto e1 = extents.at(1); - auto e2 = extents.at(2); - - - auto b = false; - BOOST_CHECK_NO_THROW ( b = (tensor_type(e0) == tensor_type(e0))); - BOOST_CHECK_NO_THROW ( b = (tensor_type(e1) == tensor_type(e2))); - BOOST_CHECK_NO_THROW ( b = (tensor_type(e0) == tensor_type(e2))); - BOOST_CHECK_NO_THROW ( b = (tensor_type(e1) != tensor_type(e2))); - - BOOST_CHECK_THROW ( b = (tensor_type(e1) >= tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW ( b = (tensor_type(e1) <= tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW ( b = (tensor_type(e1) < tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW ( b = (tensor_type(e1) > tensor_type(e2)), std::runtime_error ); + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + + + auto check = [](auto const& e) + { + auto t = tensor_type (e); + auto t2 = tensor_type (e); + auto v = value_type {}; + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + BOOST_CHECK( t == t ); + BOOST_CHECK( t != t2 ); + + if(t.empty()) + return; + + BOOST_CHECK(!(t < t)); + BOOST_CHECK(!(t > t)); + BOOST_CHECK( t < t2 ); + BOOST_CHECK( t2 > t ); + BOOST_CHECK( t <= t ); + BOOST_CHECK( t >= t ); + BOOST_CHECK( t <= t2 ); + BOOST_CHECK( t2 >= t ); + BOOST_CHECK( t2 >= t2 ); + BOOST_CHECK( t2 >= t ); + }; + + for(auto const& e : extents) + check(e); + + auto e0 = extents.at(0); + auto e1 = extents.at(1); + auto e2 = extents.at(2); + + + auto b = false; + BOOST_CHECK_NO_THROW ( b = (tensor_type(e0) == tensor_type(e0))); + BOOST_CHECK_NO_THROW ( b = (tensor_type(e1) == tensor_type(e2))); + BOOST_CHECK_NO_THROW ( b = (tensor_type(e0) == tensor_type(e2))); + BOOST_CHECK_NO_THROW ( b = (tensor_type(e1) != tensor_type(e2))); + + BOOST_CHECK_THROW ( b = (tensor_type(e1) >= tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW ( b = (tensor_type(e1) <= tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW ( b = (tensor_type(e1) < tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW ( b = (tensor_type(e1) > tensor_type(e2)), std::runtime_error ); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - - - auto check = [](auto const& e) - { - auto t = tensor_type (e); - auto t2 = tensor_type (e); - auto v = value_type {}; - - std::iota(t.begin(), t.end(), v); - std::iota(t2.begin(), t2.end(), v+2); - - BOOST_CHECK( t == t ); - BOOST_CHECK( t != t2 ); - - if(t.empty()) - return; - - BOOST_CHECK( !(t < t) ); - BOOST_CHECK( !(t > t) ); - BOOST_CHECK( t < (t2+t) ); - BOOST_CHECK( (t2+t) > t ); - BOOST_CHECK( t <= (t+t) ); - BOOST_CHECK( (t+t2) >= t ); - BOOST_CHECK( (t2+t2+2) >= t); - BOOST_CHECK( 2*t2 > t ); - BOOST_CHECK( t < 2*t2 ); - BOOST_CHECK( 2*t2 > t); - BOOST_CHECK( 2*t2 >= t2 ); - BOOST_CHECK( t2 <= 2*t2); - BOOST_CHECK( 3*t2 >= t ); - - }; - - for(auto const& e : extents) - check(e); - - auto e0 = extents.at(0); - auto e1 = extents.at(1); - auto e2 = extents.at(2); - - auto b = false; - BOOST_CHECK_NO_THROW (b = tensor_type(e0) == (tensor_type(e0) + tensor_type(e0)) ); - BOOST_CHECK_NO_THROW (b = tensor_type(e1) == (tensor_type(e2) + tensor_type(e2)) ); - BOOST_CHECK_NO_THROW (b = tensor_type(e0) == (tensor_type(e2) + 2) ); - BOOST_CHECK_NO_THROW (b = tensor_type(e1) != (2 + tensor_type(e2)) ); - - BOOST_CHECK_NO_THROW (b = (tensor_type(e0) + tensor_type(e0)) == tensor_type(e0) ); - BOOST_CHECK_NO_THROW (b = (tensor_type(e2) + tensor_type(e2)) == tensor_type(e1) ); - BOOST_CHECK_NO_THROW (b = (tensor_type(e2) + 2) == tensor_type(e0) ); - BOOST_CHECK_NO_THROW (b = (2 + tensor_type(e2)) != tensor_type(e1) ); - - BOOST_CHECK_THROW (b = tensor_type(e1) >= (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) <= (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) < (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) > (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); - - BOOST_CHECK_THROW (b = tensor_type(e1) >= (tensor_type(e2) + 2), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) <= (2 + tensor_type(e2)), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) < (tensor_type(e2) + 3), std::runtime_error ); - BOOST_CHECK_THROW (b = tensor_type(e1) > (4 + tensor_type(e2)), std::runtime_error ); + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + + + auto check = [](auto const& e) + { + auto t = tensor_type (e); + auto t2 = tensor_type (e); + auto v = value_type {}; + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + BOOST_CHECK( t == t ); + BOOST_CHECK( t != t2 ); + + if(t.empty()) + return; + + BOOST_CHECK( !(t < t) ); + BOOST_CHECK( !(t > t) ); + BOOST_CHECK( t < (t2+t) ); + BOOST_CHECK( (t2+t) > t ); + BOOST_CHECK( t <= (t+t) ); + BOOST_CHECK( (t+t2) >= t ); + BOOST_CHECK( (t2+t2+2) >= t); + BOOST_CHECK( 2*t2 > t ); + BOOST_CHECK( t < 2*t2 ); + BOOST_CHECK( 2*t2 > t); + BOOST_CHECK( 2*t2 >= t2 ); + BOOST_CHECK( t2 <= 2*t2); + BOOST_CHECK( 3*t2 >= t ); + + }; + + for(auto const& e : extents) + check(e); + + auto e0 = extents.at(0); + auto e1 = extents.at(1); + auto e2 = extents.at(2); + + auto b = false; + BOOST_CHECK_NO_THROW (b = tensor_type(e0) == (tensor_type(e0) + tensor_type(e0)) ); + BOOST_CHECK_NO_THROW (b = tensor_type(e1) == (tensor_type(e2) + tensor_type(e2)) ); + BOOST_CHECK_NO_THROW (b = tensor_type(e0) == (tensor_type(e2) + 2) ); + BOOST_CHECK_NO_THROW (b = tensor_type(e1) != (2 + tensor_type(e2)) ); + + BOOST_CHECK_NO_THROW (b = (tensor_type(e0) + tensor_type(e0)) == tensor_type(e0) ); + BOOST_CHECK_NO_THROW (b = (tensor_type(e2) + tensor_type(e2)) == tensor_type(e1) ); + BOOST_CHECK_NO_THROW (b = (tensor_type(e2) + 2) == tensor_type(e0) ); + BOOST_CHECK_NO_THROW (b = (2 + tensor_type(e2)) != tensor_type(e1) ); + + BOOST_CHECK_THROW (b = tensor_type(e1) >= (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) <= (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) < (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) > (tensor_type(e2) + tensor_type(e2)), std::runtime_error ); + + BOOST_CHECK_THROW (b = tensor_type(e1) >= (tensor_type(e2) + 2), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) <= (2 + tensor_type(e2)), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) < (tensor_type(e2) + 3), std::runtime_error ); + BOOST_CHECK_THROW (b = tensor_type(e1) > (4 + tensor_type(e2)), std::runtime_error ); } @@ -175,70 +175,70 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, test_types, fixture) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - - - auto check = [](auto const& e) - { - - BOOST_CHECK( tensor_type(e,value_type{2}) == tensor_type(e,value_type{2}) ); - BOOST_CHECK( tensor_type(e,value_type{2}) != tensor_type(e,value_type{1}) ); - - if(e.empty()) - return; - - BOOST_CHECK( !(tensor_type(e,2) < 2) ); - BOOST_CHECK( !(tensor_type(e,2) > 2) ); - BOOST_CHECK( (tensor_type(e,2) >= 2) ); - BOOST_CHECK( (tensor_type(e,2) <= 2) ); - BOOST_CHECK( (tensor_type(e,2) == 2) ); - BOOST_CHECK( (tensor_type(e,2) != 3) ); - - BOOST_CHECK( !(2 > tensor_type(e,2)) ); - BOOST_CHECK( !(2 < tensor_type(e,2)) ); - BOOST_CHECK( (2 <= tensor_type(e,2)) ); - BOOST_CHECK( (2 >= tensor_type(e,2)) ); - BOOST_CHECK( (2 == tensor_type(e,2)) ); - BOOST_CHECK( (3 != tensor_type(e,2)) ); - - BOOST_CHECK( !( tensor_type(e,2)+3 < 5) ); - BOOST_CHECK( !( tensor_type(e,2)+3 > 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 >= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 <= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 == 5) ); - BOOST_CHECK( ( tensor_type(e,2)+3 != 6) ); - - - BOOST_CHECK( !( 5 > tensor_type(e,2)+3) ); - BOOST_CHECK( !( 5 < tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 >= tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 <= tensor_type(e,2)+3) ); - BOOST_CHECK( ( 5 == tensor_type(e,2)+3) ); - BOOST_CHECK( ( 6 != tensor_type(e,2)+3) ); - - - BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) < 5) ); - BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) > 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) >= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) <= 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) == 5) ); - BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) != 6) ); - - - BOOST_CHECK( !( 5 > tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( !( 5 < tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 >= tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 <= tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 5 == tensor_type(e,2)+tensor_type(e,3)) ); - BOOST_CHECK( ( 6 != tensor_type(e,2)+tensor_type(e,3)) ); - - }; - - for(auto const& e : extents) - check(e); + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + + + auto check = [](auto const& e) + { + + BOOST_CHECK( tensor_type(e,value_type{2}) == tensor_type(e,value_type{2}) ); + BOOST_CHECK( tensor_type(e,value_type{2}) != tensor_type(e,value_type{1}) ); + + if(ublas::empty(e)) + return; + + BOOST_CHECK( !(tensor_type(e,2) < 2) ); + BOOST_CHECK( !(tensor_type(e,2) > 2) ); + BOOST_CHECK( (tensor_type(e,2) >= 2) ); + BOOST_CHECK( (tensor_type(e,2) <= 2) ); + BOOST_CHECK( (tensor_type(e,2) == 2) ); + BOOST_CHECK( (tensor_type(e,2) != 3) ); + + BOOST_CHECK( !(2 > tensor_type(e,2)) ); + BOOST_CHECK( !(2 < tensor_type(e,2)) ); + BOOST_CHECK( (2 <= tensor_type(e,2)) ); + BOOST_CHECK( (2 >= tensor_type(e,2)) ); + BOOST_CHECK( (2 == tensor_type(e,2)) ); + BOOST_CHECK( (3 != tensor_type(e,2)) ); + + BOOST_CHECK( !( tensor_type(e,2)+3 < 5) ); + BOOST_CHECK( !( tensor_type(e,2)+3 > 5) ); + BOOST_CHECK( ( tensor_type(e,2)+3 >= 5) ); + BOOST_CHECK( ( tensor_type(e,2)+3 <= 5) ); + BOOST_CHECK( ( tensor_type(e,2)+3 == 5) ); + BOOST_CHECK( ( tensor_type(e,2)+3 != 6) ); + + + BOOST_CHECK( !( 5 > tensor_type(e,2)+3) ); + BOOST_CHECK( !( 5 < tensor_type(e,2)+3) ); + BOOST_CHECK( ( 5 >= tensor_type(e,2)+3) ); + BOOST_CHECK( ( 5 <= tensor_type(e,2)+3) ); + BOOST_CHECK( ( 5 == tensor_type(e,2)+3) ); + BOOST_CHECK( ( 6 != tensor_type(e,2)+3) ); + + + BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) < 5) ); + BOOST_CHECK( !( tensor_type(e,2)+tensor_type(e,3) > 5) ); + BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) >= 5) ); + BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) <= 5) ); + BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) == 5) ); + BOOST_CHECK( ( tensor_type(e,2)+tensor_type(e,3) != 6) ); + + + BOOST_CHECK( !( 5 > tensor_type(e,2)+tensor_type(e,3)) ); + BOOST_CHECK( !( 5 < tensor_type(e,2)+tensor_type(e,3)) ); + BOOST_CHECK( ( 5 >= tensor_type(e,2)+tensor_type(e,3)) ); + BOOST_CHECK( ( 5 <= tensor_type(e,2)+tensor_type(e,3)) ); + BOOST_CHECK( ( 5 == tensor_type(e,2)+tensor_type(e,3)) ); + BOOST_CHECK( ( 6 != tensor_type(e,2)+tensor_type(e,3)) ); + + }; + + for(auto const& e : extents) + check(e); } diff --git a/test/tensor/test_static_expression_evaluation.cpp b/test/tensor/test_static_expression_evaluation.cpp index df66b1ee3..7412eef01 100644 --- a/test/tensor/test_static_expression_evaluation.cpp +++ b/test/tensor/test_static_expression_evaluation.cpp @@ -18,42 +18,42 @@ #include -BOOST_AUTO_TEST_SUITE(test_static_tensor_expression) +BOOST_AUTO_TEST_SUITE(test_tensor_static_expression) using test_types = zip>::with_t; struct fixture { - template - using extents_type = boost::numeric::ublas::static_extents; - - std::tuple< - extents_type<1,1>, // 1 - extents_type<2,3>, // 2 - extents_type<4,1,3>, // 3 - extents_type<4,2,3>, // 4 - extents_type<4,2,3,5> // 5 + template + using extents_type = boost::numeric::ublas::extents; + + std::tuple< + extents_type<1,1>, // 1 + extents_type<2,3>, // 2 + extents_type<4,1,3>, // 3 + extents_type<4,2,3>, // 4 + extents_type<4,2,3,5> // 5 > extents; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_retrieve_extents, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_expression_retrieve_extents, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); + auto uplus1 = [](auto const& a){return a + value_type(1);}; + auto uplus2 = [](auto const& a){return value_type(2) + a;}; auto bplus = std::plus {}; auto bminus = std::minus{}; - for_each_tuple(extents, [&](auto const&, auto& e){ + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; - auto t = tensor_type{}; + auto t = tensor_type(); auto v = value_type{}; for(auto& tt: t){ tt = v; v+=value_type{1}; } @@ -82,23 +82,23 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_retrieve_extents }); - for_each_tuple(extents, [&](auto I, auto& e1){ + for_each_in_tuple(extents, [&](auto I, auto& e1){ if ( I >= std::tuple_size_v - 1){ return; } using extents_type1 = std::decay_t; - using tensor_type1 = ublas::static_tensor; + using tensor_type1 = ublas::tensor_static; - for_each_tuple(extents, [&](auto J, auto& e2){ + for_each_in_tuple(extents, [&](auto J, auto& e2){ if( J != I + 1 ){ return; } using extents_type2 = std::decay_t; - using tensor_type2 = ublas::static_tensor; + using tensor_type2 = ublas::tensor_static; auto v = value_type{}; @@ -129,20 +129,20 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_retrieve_extents -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_all_extents_equal, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_expression_all_extents_equal, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto uplus1 = std::bind( std::plus{}, std::placeholders::_1, value_type(1) ); - auto uplus2 = std::bind( std::plus{}, value_type(2), std::placeholders::_2 ); + auto uplus1 = [](auto const& a){return a + value_type(1);}; + auto uplus2 = [](auto const& a){return value_type(2) + a;}; auto bplus = std::plus {}; auto bminus = std::minus{}; - for_each_tuple(extents, [&](auto const&, auto& e){ + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type{}; @@ -175,23 +175,23 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_all_extents_equa }); - for_each_tuple(extents, [&](auto I, auto& e1){ + for_each_in_tuple(extents, [&](auto I, auto& e1){ if ( I >= std::tuple_size_v - 1){ return; } using extents_type1 = std::decay_t; - using tensor_type1 = ublas::static_tensor; + using tensor_type1 = ublas::tensor_static; - for_each_tuple(extents, [&](auto J, auto& e2){ + for_each_in_tuple(extents, [&](auto J, auto& e2){ if( J != I + 1 ){ return; } using extents_type2 = std::decay_t; - using tensor_type2 = ublas::static_tensor; + using tensor_type2 = ublas::tensor_static; auto v = value_type{}; @@ -217,4 +217,4 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_static_tensor_expression_all_extents_equa } -BOOST_AUTO_TEST_SUITE_END(); +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_static_extents.cpp b/test/tensor/test_static_extents.cpp index e44040083..c92794886 100644 --- a/test/tensor/test_static_extents.cpp +++ b/test/tensor/test_static_extents.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,477 +10,356 @@ // Google and Fraunhofer IOSB, Ettlingen, Germany // -#include +#include #include #include #include "utility.hpp" BOOST_AUTO_TEST_SUITE(test_static_extents) -template -using extents = boost::numeric::ublas::basic_static_extents; -BOOST_AUTO_TEST_CASE(test_static_extents_ctor) { - using namespace boost::numeric; +struct fixture +{ + template + using extents = boost::numeric::ublas::extents; + + extents<> e0 {}; + extents<1> e1 {}; + extents<1, 1> e11 {}; + extents<2, 1> e21 {}; + extents<1, 2> e12 {}; + extents<2, 3> e23 {}; + extents<2, 1, 1> e211 {}; + extents<2, 3, 1> e231 {}; + extents<1, 2, 3> e123 {}; + extents<4, 2, 3> e423 {}; + extents<1, 2, 3, 4> e1234 {}; + extents<4, 2, 1, 3> e4213 {}; + extents<1, 2, 3, 4, 1> e12341 {}; + extents<4, 2, 1, 3, 1> e42131 {}; + extents<1, 4, 2, 1, 3, 1> e142131 {}; +}; - auto e0 = extents<>{}; - BOOST_CHECK(e0.empty()); - BOOST_CHECK_EQUAL(e0.size(), 0); +BOOST_FIXTURE_TEST_CASE(test_extents_static_ctor, fixture, + *boost::unit_test::label("extents_static") *boost::unit_test::label("ctor")) +{ - auto e1 = extents<1, 2>{}; - BOOST_CHECK(!e1.empty()); - BOOST_CHECK_EQUAL(e1.size(), 2); + namespace ublas = boost::numeric::ublas; + + BOOST_CHECK( ublas::empty( e0)); + BOOST_CHECK(! ublas::empty( e1)); + BOOST_CHECK(! ublas::empty( e11)); + BOOST_CHECK(! ublas::empty( e12)); + BOOST_CHECK(! ublas::empty( e21)); + BOOST_CHECK(! ublas::empty( e23)); + BOOST_CHECK(! ublas::empty( e211)); + BOOST_CHECK(! ublas::empty( e123)); + BOOST_CHECK(! ublas::empty( e423)); + BOOST_CHECK(! ublas::empty( e1234)); + BOOST_CHECK(! ublas::empty( e4213)); + BOOST_CHECK(! ublas::empty(e142131)); + + BOOST_CHECK_EQUAL( ublas::size( e0),0); + BOOST_CHECK_EQUAL( ublas::size( e1),1); + BOOST_CHECK_EQUAL( ublas::size( e11),2); + BOOST_CHECK_EQUAL( ublas::size( e12),2); + BOOST_CHECK_EQUAL( ublas::size( e21),2); + BOOST_CHECK_EQUAL( ublas::size( e23),2); + BOOST_CHECK_EQUAL( ublas::size( e211),3); + BOOST_CHECK_EQUAL( ublas::size( e123),3); + BOOST_CHECK_EQUAL( ublas::size( e423),3); + BOOST_CHECK_EQUAL( ublas::size( e1234),4); + BOOST_CHECK_EQUAL( ublas::size( e4213),4); + BOOST_CHECK_EQUAL( ublas::size(e142131),6); + + + BOOST_CHECK_EQUAL( ublas::size_v,0); + BOOST_CHECK_EQUAL( ublas::size_v,1); + BOOST_CHECK_EQUAL( ublas::size_v,2); + BOOST_CHECK_EQUAL( ublas::size_v,2); + BOOST_CHECK_EQUAL( ublas::size_v,2); + BOOST_CHECK_EQUAL( ublas::size_v,2); + BOOST_CHECK_EQUAL( ublas::size_v,3); + BOOST_CHECK_EQUAL( ublas::size_v,3); + BOOST_CHECK_EQUAL( ublas::size_v,3); + BOOST_CHECK_EQUAL( ublas::size_v,4); + BOOST_CHECK_EQUAL( ublas::size_v,4); + BOOST_CHECK_EQUAL( ublas::size_v,6); - auto e2 = extents<2, 3>{}; - BOOST_CHECK(!e2.empty()); - BOOST_CHECK_EQUAL(e2.size(), 2); +} - auto e3 = extents<4, 2, 3>{}; // 7 - BOOST_CHECK(!e3.empty()); - BOOST_CHECK_EQUAL(e3.size(), 3); +BOOST_FIXTURE_TEST_CASE(test_extents_static_product, fixture, + *boost::unit_test::label("extents_static") *boost::unit_test::label("product")) +{ + + namespace ublas = boost::numeric::ublas; + + BOOST_CHECK_EQUAL(ublas::product( e0), 0); + //FIXME: BOOST_CHECK_EQUAL(ublas::product( e1), 1); + BOOST_CHECK_EQUAL(ublas::product( e11), 1); + BOOST_CHECK_EQUAL(ublas::product( e12), 2); + BOOST_CHECK_EQUAL(ublas::product( e21), 2); + BOOST_CHECK_EQUAL(ublas::product( e23), 6); + BOOST_CHECK_EQUAL(ublas::product( e211), 2); + BOOST_CHECK_EQUAL(ublas::product( e123), 6); + BOOST_CHECK_EQUAL(ublas::product( e423), 24); + BOOST_CHECK_EQUAL(ublas::product( e1234), 24); + BOOST_CHECK_EQUAL(ublas::product( e4213), 24); + BOOST_CHECK_EQUAL(ublas::product(e142131), 24); + + + BOOST_CHECK_EQUAL(ublas::product_v, 0); + BOOST_CHECK_EQUAL(ublas::product_v, 1); + BOOST_CHECK_EQUAL(ublas::product_v, 1); + BOOST_CHECK_EQUAL(ublas::product_v, 2); + BOOST_CHECK_EQUAL(ublas::product_v, 2); + BOOST_CHECK_EQUAL(ublas::product_v, 6); + BOOST_CHECK_EQUAL(ublas::product_v, 2); + BOOST_CHECK_EQUAL(ublas::product_v, 6); + BOOST_CHECK_EQUAL(ublas::product_v, 24); + BOOST_CHECK_EQUAL(ublas::product_v, 24); + BOOST_CHECK_EQUAL(ublas::product_v, 24); + BOOST_CHECK_EQUAL(ublas::product_v, 24); } -struct fixture { - fixture() = default; - extents<> e0{}; // 0 - extents<1, 2, 3, 4> e1{}; // 1 - extents<1, 2, 3> e2{}; // 2 - extents<4, 2, 3> e3{}; // 3 - extents<4, 2, 1, 3> e4{}; // 4 - extents<1, 4, 2, 1, 3, 1> e5{}; // 5 +BOOST_FIXTURE_TEST_CASE(test_static_extents_access, fixture, + *boost::unit_test::label("extents_static") *boost::unit_test::label("access")) +{ + namespace ublas = boost::numeric::ublas; - std::tuple< - extents<> - > rank_0_extents; + BOOST_REQUIRE_EQUAL( ublas::size_v,0); + BOOST_REQUIRE_EQUAL( ublas::size_v,1); + BOOST_REQUIRE_EQUAL( ublas::size_v,2); + BOOST_REQUIRE_EQUAL( ublas::size_v,2); + BOOST_REQUIRE_EQUAL( ublas::size_v,2); + BOOST_REQUIRE_EQUAL( ublas::size_v,2); + BOOST_REQUIRE_EQUAL( ublas::size_v,3); + BOOST_REQUIRE_EQUAL( ublas::size_v,3); + BOOST_REQUIRE_EQUAL( ublas::size_v,3); + BOOST_REQUIRE_EQUAL( ublas::size_v,4); + BOOST_REQUIRE_EQUAL( ublas::size_v,4); + BOOST_REQUIRE_EQUAL( ublas::size_v,6); - std::tuple< - extents<1>, - extents<2> - > rank_1_extents; + + BOOST_CHECK_EQUAL((ublas::get_v), 1); + + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 1); + + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 2); + + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 1); + + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 3); + + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 1); + + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 3); + + BOOST_CHECK_EQUAL((ublas::get_v), 4); + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 3); + + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 3); + BOOST_CHECK_EQUAL((ublas::get_v), 4); + + BOOST_CHECK_EQUAL((ublas::get_v), 4); + BOOST_CHECK_EQUAL((ublas::get_v), 2); + BOOST_CHECK_EQUAL((ublas::get_v), 1); + BOOST_CHECK_EQUAL((ublas::get_v), 3); + + //FIXME: BOOST_CHECK_EQUAL(e1 [0], 1); + + BOOST_CHECK_EQUAL(e11[0], 1); + BOOST_CHECK_EQUAL(e11[1], 1); + + BOOST_CHECK_EQUAL(e12[0], 1); + BOOST_CHECK_EQUAL(e12[1], 2); + + BOOST_CHECK_EQUAL(e21[0], 2); + BOOST_CHECK_EQUAL(e21[1], 1); + + BOOST_CHECK_EQUAL(e23[0], 2); + BOOST_CHECK_EQUAL(e23[1], 3); + + BOOST_CHECK_EQUAL(e211[0], 2); + BOOST_CHECK_EQUAL(e211[1], 1); + BOOST_CHECK_EQUAL(e211[2], 1); + + BOOST_CHECK_EQUAL(e123[0], 1); + BOOST_CHECK_EQUAL(e123[1], 2); + BOOST_CHECK_EQUAL(e123[2], 3); + + BOOST_CHECK_EQUAL(e423[0], 4); + BOOST_CHECK_EQUAL(e423[1], 2); + BOOST_CHECK_EQUAL(e423[2], 3); + + BOOST_CHECK_EQUAL(e1234[0], 1); + BOOST_CHECK_EQUAL(e1234[1], 2); + BOOST_CHECK_EQUAL(e1234[2], 3); + BOOST_CHECK_EQUAL(e1234[3], 4); + + BOOST_CHECK_EQUAL(e4213[0], 4); + BOOST_CHECK_EQUAL(e4213[1], 2); + BOOST_CHECK_EQUAL(e4213[2], 1); + BOOST_CHECK_EQUAL(e4213[3], 3); +} + +struct fixture_second +{ + template + using extents = boost::numeric::ublas::extents; std::tuple< - extents<1,1>, - extents<2,2> - > rank_2_extents; + extents<> + > empty; std::tuple< - extents<1>, + //FIXME: extents<1>, extents<1,1>, extents<1,1,1>, extents<1,1,1,1> - > scalars; + > scalars; std::tuple< extents<1,2>, - extents<1,3,1>, + extents<2,1>, + extents<1,2,1>, + extents<2,1,1>, extents<1,4,1,1>, - extents<5,1,1,1,1>, - extents<6,1,1,1,1,1> - > vectors; + extents<5,1,1,1,1> + > vectors; std::tuple< extents<2,3>, extents<3,2,1>, extents<4,4,1,1>, extents<6,6,1,1,1,1> - > matrices; + > matrices; std::tuple< - extents<3,3,3>, - extents<4,4,4,1>, - extents<5,5,5,1,1>, + extents<1,2,3>, + extents<1,2,3>, + extents<1,2,3,1>, + extents<4,2,3>, + extents<4,2,3,1>, + extents<4,2,3,1,1>, extents<6,6,6,1,1,1>, extents<6,6,1,1,1,6> - > tensors; + > tensors; }; -BOOST_FIXTURE_TEST_CASE(test_static_extents_product, fixture, - *boost::unit_test::label("static_extents") * - boost::unit_test::label("product")) { - - using namespace boost::numeric::ublas; - - auto p0 = product(e0); // {} - auto p1 = product(e1); // {1,2,3,4} - auto p2 = product(e2); // {1,2,3} - auto p3 = product(e3); // {4,2,3} - auto p4 = product(e4); // {4,2,1,3} - auto p5 = product(e5); // {1,4,2,1,3,1} - - auto sp0 = product(e0); // {} - auto sp1 = product(e1); // {1,2,3,4} - auto sp2 = product(e2); // {1,2,3} - auto sp3 = product(e3); // {4,2,3} - auto sp4 = product(e4); // {4,2,1,3} - auto sp5 = product(e5); // {1,4,2,1,3,1} - - BOOST_CHECK_EQUAL(p0, 0); - BOOST_CHECK_EQUAL(p1, 24); - BOOST_CHECK_EQUAL(p2, 6); - BOOST_CHECK_EQUAL(p3, 24); - BOOST_CHECK_EQUAL(p4, 24); - BOOST_CHECK_EQUAL(p5, 24); - - BOOST_CHECK_EQUAL(sp0, 0); - BOOST_CHECK_EQUAL(sp1, 24); - BOOST_CHECK_EQUAL(sp2, 6); - BOOST_CHECK_EQUAL(sp3, 24); - BOOST_CHECK_EQUAL(sp4, 24); - BOOST_CHECK_EQUAL(sp5, 24); -} -BOOST_FIXTURE_TEST_CASE(test_static_extents_access, fixture, - *boost::unit_test::label("static_extents") * - boost::unit_test::label("access")) { - using namespace boost::numeric; - - BOOST_CHECK_EQUAL(e0.size(), 0); - BOOST_CHECK(e0.empty()); - - BOOST_REQUIRE_EQUAL(e1.size(), 4); - BOOST_REQUIRE_EQUAL(e2.size(), 3); - BOOST_REQUIRE_EQUAL(e3.size(), 3); - BOOST_REQUIRE_EQUAL(e4.size(), 4); - BOOST_REQUIRE_EQUAL(e5.size(), 6); - - BOOST_CHECK_EQUAL(e1[0], 1); - BOOST_CHECK_EQUAL(e1[1], 2); - BOOST_CHECK_EQUAL(e1[2], 3); - BOOST_CHECK_EQUAL(e1[3], 4); - - BOOST_CHECK_EQUAL(e2[0], 1); - BOOST_CHECK_EQUAL(e2[1], 2); - BOOST_CHECK_EQUAL(e2[2], 3); - - BOOST_CHECK_EQUAL(e3[0], 4); - BOOST_CHECK_EQUAL(e3[1], 2); - BOOST_CHECK_EQUAL(e3[2], 3); - - BOOST_CHECK_EQUAL(e4[0], 4); - BOOST_CHECK_EQUAL(e4[1], 2); - BOOST_CHECK_EQUAL(e4[2], 1); - BOOST_CHECK_EQUAL(e4[3], 3); - - BOOST_CHECK_EQUAL(e5[0], 1); - BOOST_CHECK_EQUAL(e5[1], 4); - BOOST_CHECK_EQUAL(e5[2], 2); - BOOST_CHECK_EQUAL(e5[3], 1); - BOOST_CHECK_EQUAL(e5[4], 3); - BOOST_CHECK_EQUAL(e5[5], 1); -} +BOOST_FIXTURE_TEST_CASE(test_static_extents, fixture_second, + *boost::unit_test::label("extents_static") *boost::unit_test::label("is_scalar_vector_matrix_tensor")) { -BOOST_FIXTURE_TEST_CASE(test_static_extents, fixture, - *boost::unit_test::label("static_extents") * - boost::unit_test::label("query")) { - - using namespace boost::numeric::ublas; - // e0 ==> {} - // e1 ==> {0,0,0,0} - // e2 ==> {1,2,3} - // e3 ==> {4,2,3} - // e4 ==> {4,2,1,3} - // e5 ==> {1,4,2,1,3,1} - - BOOST_CHECK( e0.empty( )); - BOOST_CHECK( !is_scalar( e0)); - BOOST_CHECK( !is_vector( e0)); - BOOST_CHECK( !is_matrix( e0)); - BOOST_CHECK( !is_tensor( e0)); - BOOST_CHECK( !is_scalar( e0)); - BOOST_CHECK( !is_vector( e0)); - BOOST_CHECK( !is_matrix( e0)); - BOOST_CHECK( !is_tensor( e0)); - - BOOST_CHECK( ! e1.empty( ) ); - BOOST_CHECK( !is_scalar( e1) ); - BOOST_CHECK( !is_vector( e1) ); - BOOST_CHECK( !is_matrix( e1) ); - BOOST_CHECK( is_tensor( e1) ); - BOOST_CHECK( !is_scalar( e1) ); - BOOST_CHECK( !is_vector( e1) ); - BOOST_CHECK( !is_matrix( e1) ); - BOOST_CHECK( is_tensor( e1) ); - - BOOST_CHECK( ! e2.empty( ) ); - BOOST_CHECK( !is_scalar( e2) ); - BOOST_CHECK( !is_vector( e2) ); - BOOST_CHECK( !is_matrix( e2) ); - BOOST_CHECK( is_tensor( e2) ); - BOOST_CHECK( !is_scalar( e2) ); - BOOST_CHECK( !is_vector( e2) ); - BOOST_CHECK( !is_matrix( e2) ); - BOOST_CHECK( is_tensor( e2) ); - - BOOST_CHECK( ! e3.empty( ) ); - BOOST_CHECK( !is_scalar( e3) ); - BOOST_CHECK( !is_vector( e3) ); - BOOST_CHECK( !is_matrix( e3) ); - BOOST_CHECK( is_tensor( e3) ); - BOOST_CHECK( !is_scalar( e3) ); - BOOST_CHECK( !is_vector( e3) ); - BOOST_CHECK( !is_matrix( e3) ); - BOOST_CHECK( is_tensor( e3) ); - - BOOST_CHECK( ! e4.empty( ) ); - BOOST_CHECK( !is_scalar( e4) ); - BOOST_CHECK( !is_vector( e4) ); - BOOST_CHECK( !is_matrix( e4) ); - BOOST_CHECK( is_tensor( e4) ); - BOOST_CHECK( !is_scalar( e4) ); - BOOST_CHECK( !is_vector( e4) ); - BOOST_CHECK( !is_matrix( e4) ); - BOOST_CHECK( is_tensor( e4) ); - - BOOST_CHECK( ! e5.empty( ) ); - BOOST_CHECK( !is_scalar( e5) ); - BOOST_CHECK( !is_vector( e5) ); - BOOST_CHECK( !is_matrix( e5) ); - BOOST_CHECK( is_tensor( e5) ); - BOOST_CHECK( !is_scalar( e5) ); - BOOST_CHECK( !is_vector( e5) ); - BOOST_CHECK( !is_matrix( e5) ); - BOOST_CHECK( is_tensor( e5) ); - - boost::numeric::ublas::basic_static_extents e14; - BOOST_CHECK( ! e14.empty( ) ); - BOOST_CHECK( ! is_scalar(e14) ); - BOOST_CHECK( is_vector(e14) ); - BOOST_CHECK( ! is_matrix(e14) ); - BOOST_CHECK( ! is_tensor(e14) ); - BOOST_CHECK( ! is_scalar(e14) ); - BOOST_CHECK( is_vector(e14) ); - BOOST_CHECK( ! is_matrix(e14) ); - BOOST_CHECK( ! is_tensor(e14) ); - - - for_each_tuple(rank_0_extents,[](auto const&, auto& e){ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - }); + namespace ublas = boost::numeric::ublas; + for_each_in_tuple(scalars,[](auto const& /*unused*/, auto const& e){ + BOOST_CHECK( ublas::is_scalar(e) ); + BOOST_CHECK( ublas::is_vector(e) ); + BOOST_CHECK( ublas::is_matrix(e) ); + BOOST_CHECK( !ublas::is_tensor(e) ); - for_each_tuple(rank_1_extents,[](auto const& I, auto const& e){ - if( I == 0 ){ - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - }else{ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - } - }); + BOOST_CHECK( ublas::is_scalar_v); + BOOST_CHECK( ublas::is_vector_v); + BOOST_CHECK( ublas::is_matrix_v); + BOOST_CHECK( !ublas::is_tensor_v); - for_each_tuple(rank_2_extents,[](auto const& I, auto const& e){ - if( I == 0 ){ - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - }else{ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - } }); - for_each_tuple(scalars,[](auto const&, auto& e){ - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - }); + for_each_in_tuple(vectors,[](auto const& /*unused*/, auto& e){ + BOOST_CHECK( !ublas::is_scalar(e) ); + BOOST_CHECK( ublas::is_vector(e) ); + BOOST_CHECK( ublas::is_matrix(e) ); + BOOST_CHECK( !ublas::is_tensor(e) ); - for_each_tuple(vectors,[](auto const&, auto& e){ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); + BOOST_CHECK( !ublas::is_scalar_v); + BOOST_CHECK( ublas::is_vector_v); + BOOST_CHECK( ublas::is_matrix_v); + BOOST_CHECK( !ublas::is_tensor_v); }); - for_each_tuple(matrices,[](auto const&, auto& e){ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( is_matrix(e) ); - BOOST_CHECK( !is_tensor(e) ); - - }); + for_each_in_tuple(matrices,[](auto const& /*unused*/, auto& e){ + BOOST_CHECK( !ublas::is_scalar(e) ); + BOOST_CHECK( !ublas::is_vector(e) ); + BOOST_CHECK( ublas::is_matrix(e) ); + BOOST_CHECK( !ublas::is_tensor(e) ); - for_each_tuple(tensors,[](auto const&, auto& e){ - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( is_tensor(e) ); - BOOST_CHECK( !is_scalar(e) ); - BOOST_CHECK( !is_vector(e) ); - BOOST_CHECK( !is_matrix(e) ); - BOOST_CHECK( is_tensor(e) ); + BOOST_CHECK( !ublas::is_scalar_v); + BOOST_CHECK( !ublas::is_vector_v); + BOOST_CHECK( ublas::is_matrix_v); + BOOST_CHECK( !ublas::is_tensor_v); }); -} + for_each_in_tuple(tensors,[](auto const& /*unused*/, auto& e){ + BOOST_CHECK( !ublas::is_scalar(e) ); + BOOST_CHECK( !ublas::is_vector(e) ); + BOOST_CHECK( !ublas::is_matrix(e) ); + BOOST_CHECK( ublas::is_tensor(e) ); -BOOST_FIXTURE_TEST_CASE(test_static_extents_valid, fixture, *boost::unit_test::label("static_extents") *boost::unit_test::label("valid")) -{ - using namespace boost::numeric::ublas; - for_each_tuple(rank_0_extents,[](auto const&, auto& e){ - BOOST_CHECK(!is_valid(e)); - BOOST_CHECK(!is_valid(e)); + BOOST_CHECK( !ublas::is_scalar_v); + BOOST_CHECK( !ublas::is_vector_v); + BOOST_CHECK( !ublas::is_matrix_v); + BOOST_CHECK( ublas::is_tensor_v); }); - for_each_tuple(rank_1_extents,[](auto const& I, auto const& e){ - if( I== 0 ){ - BOOST_CHECK(is_valid(e)); - }else{ - BOOST_CHECK(!is_valid(e)); - BOOST_CHECK(!is_valid(e)); - } - }); - - for_each_tuple(rank_2_extents,[](auto const&, auto& e){ - BOOST_CHECK(is_valid(e)); - BOOST_CHECK(is_valid(e)); - }); - - for_each_tuple(scalars,[](auto const&, auto& e){ - BOOST_CHECK(is_valid(e)); - BOOST_CHECK(is_valid(e)); - }); - - for_each_tuple(vectors,[](auto const&, auto& e){ - BOOST_CHECK(is_valid(e)); - BOOST_CHECK(is_valid(e)); - }); - - for_each_tuple(matrices,[](auto const&, auto& e){ - BOOST_CHECK(is_valid(e)); - BOOST_CHECK(is_valid(e)); - }); - - for_each_tuple(tensors,[](auto const&, auto& e){ - BOOST_CHECK(is_valid(e)); - BOOST_CHECK(is_valid(e)); - }); } -BOOST_FIXTURE_TEST_CASE(test_static_extents_comparsion_operator, fixture, *boost::unit_test::label("static_extents") *boost::unit_test::label("comparsion_operator")) +BOOST_FIXTURE_TEST_CASE(test_static_extents_valid, fixture_second, + *boost::unit_test::label("extents_extents") *boost::unit_test::label("valid")) { + namespace ublas = boost::numeric::ublas; - auto const compare_extents = [](auto const& e1, auto const& e2){ - if(e1.size() != e2.size()) return false; - for(auto i = 0ul ; i < e1.size(); i++){ - if(e1[i] != e2[i]){ - return false; - } - } - return true; - }; - - for_each_tuple(rank_0_extents,[&](auto const&, auto const& e1){ - for_each_tuple(rank_1_extents,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); +//FIXME: BOOST_CHECK(!ublas::is_valid (extents<0>{}) ); +//FIXME: BOOST_CHECK( ublas::is_valid (extents<2>{}) ); +//FIXME: BOOST_CHECK( ublas::is_valid (extents<3>{}) ); - for_each_tuple(rank_1_extents,[&](auto const&, auto const& e1){ - for_each_tuple(rank_1_extents,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); + BOOST_CHECK(!ublas::is_valid_v> ); + BOOST_CHECK( ublas::is_valid_v> ); + BOOST_CHECK( ublas::is_valid_v> ); - for_each_tuple(rank_1_extents,[&](auto const&, auto const& e1){ - for_each_tuple(rank_2_extents,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); - - for_each_tuple(scalars,[&](auto const&, auto const& e1){ - for_each_tuple(scalars,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); - - for_each_tuple(scalars,[&](auto const&, auto const& e1){ - for_each_tuple(vectors,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); - for_each_tuple(scalars,[&](auto const&, auto const& e1){ - for_each_tuple(matrices,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); + for_each_in_tuple(scalars ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid (e) ); }); + for_each_in_tuple(vectors ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid (e) ); }); + for_each_in_tuple(matrices ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid (e) ); }); + for_each_in_tuple(tensors ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid (e) ); }); - for_each_tuple(scalars,[&](auto const&, auto const& e1){ - for_each_tuple(tensors,[&](auto const&, auto const& e2){ - BOOST_CHECK(compare_extents(e1,e2) == (e1 == e2)); - }); - }); + for_each_in_tuple(scalars ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid_v ); }); + for_each_in_tuple(vectors ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid_v ); }); + for_each_in_tuple(matrices ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid_v ); }); + for_each_in_tuple(tensors ,[](auto const& /*unused*/, auto& e){ BOOST_CHECK( ublas::is_valid_v ); }); } -BOOST_FIXTURE_TEST_CASE(test_static_extents_squeeze, fixture, *boost::unit_test::label("static_extents") *boost::unit_test::label("squeeze")) -{ - auto e_sq2 = squeeze(e2 ) ;//==> {2,3} - auto e_sq3 = squeeze(e3 ) ;//==> {4,2,3} - auto e_sq4 = squeeze(e4 ) ;//==> {4,2,3} - auto e_sq5 = squeeze(e5 ) ;//==> {4,2,3} - - BOOST_CHECK( (e_sq2 == extents<2,3>{}) ); - BOOST_CHECK( (e_sq3 == extents<4,2,3>{}) ); - - BOOST_CHECK( (e_sq4 == extents<4,2,3>{}) ); - BOOST_CHECK( (e_sq5 == extents<4,2,3>{}) ); -} - -BOOST_AUTO_TEST_CASE(test_static_extents_exception) +BOOST_FIXTURE_TEST_CASE(test_static_extents_comparsion_operator, fixture, + *boost::unit_test::label("extents_static") *boost::unit_test::label("equals")) { - using namespace boost::numeric::ublas; - - basic_static_extents e1; - for(auto i = e1.size(); i < 3; i++){ - BOOST_REQUIRE_THROW( (void)e1.at(i),std::out_of_range ); - } - - BOOST_REQUIRE_THROW((void)e1.at(std::numeric_limits::max()),std::out_of_range); + namespace ublas = boost::numeric::ublas; + + BOOST_CHECK( e0 == e0 ); + BOOST_CHECK( e1 == e1 ); + BOOST_CHECK( e11 == e11 ); + BOOST_CHECK( e21 == e21 ); + BOOST_CHECK( e12 == e12 ); + BOOST_CHECK( e23 == e23 ); + BOOST_CHECK( e231 == e231 ); + BOOST_CHECK( e211 == e211 ); + BOOST_CHECK( e123 == e123 ); + BOOST_CHECK( e423 == e423 ); + BOOST_CHECK( e1234 == e1234 ); + BOOST_CHECK( e4213 == e4213 ); + BOOST_CHECK( e142131 == e142131 ); } diff --git a/test/tensor/test_static_operators_arithmetic.cpp b/test/tensor/test_static_operators_arithmetic.cpp index 97a75cc5d..71e1447ad 100644 --- a/test/tensor/test_static_operators_arithmetic.cpp +++ b/test/tensor/test_static_operators_arithmetic.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -18,7 +18,7 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_static_tensor_arithmetic_operations) +BOOST_AUTO_TEST_SUITE(test_tensor_static_arithmetic_operations) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; @@ -27,9 +27,9 @@ using test_types = zip::with_t - using extents_type = boost::numeric::ublas::static_extents; + using extents_type = boost::numeric::ublas::extents; - fixture() {} + fixture() = default; std::tuple< extents_type<1,1>, // 1 @@ -44,15 +44,15 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type (); auto t2 = tensor_type (); auto r = tensor_type (); @@ -99,22 +99,22 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, valu BOOST_CHECK_EQUAL ( r(i), 1 ); }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type (); auto t2 = tensor_type (); auto v = value_type {}; @@ -154,7 +154,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } @@ -163,15 +163,15 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type (); auto t2 = tensor_type (); auto r = tensor_type (); @@ -235,7 +235,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, valu BOOST_CHECK_EQUAL ( p(i), r(i) ); }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } diff --git a/test/tensor/test_static_operators_comparison.cpp b/test/tensor/test_static_operators_comparison.cpp index 045cea23a..9010482d0 100644 --- a/test/tensor/test_static_operators_comparison.cpp +++ b/test/tensor/test_static_operators_comparison.cpp @@ -17,17 +17,18 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_static_tensor_comparison) +BOOST_AUTO_TEST_SUITE(test_tensor_static_comparison) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; using test_types = zip::with_t; struct fixture { + template - using extents_type = boost::numeric::ublas::static_extents; + using extents_type = boost::numeric::ublas::extents; - fixture() {} + fixture()= default; std::tuple< extents_type<1,1>, // 1 @@ -41,14 +42,14 @@ struct fixture { BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type (); auto t2 = tensor_type (); auto v = value_type {}; @@ -74,22 +75,22 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fi BOOST_CHECK( t2 >= t ); }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type (); auto t2 = tensor_type (); @@ -120,7 +121,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } @@ -128,20 +129,20 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; BOOST_CHECK( tensor_type(value_type{2}) == tensor_type(value_type{2}) ); BOOST_CHECK( tensor_type(value_type{2}) != tensor_type(value_type{1}) ); - if(e.empty()) + if(ublas::empty(e)) return; BOOST_CHECK( !(tensor_type(2) < 2) ); @@ -189,9 +190,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, te BOOST_CHECK( ( 5 == tensor_type(2)+tensor_type(3)) ); BOOST_CHECK( ( 6 != tensor_type(2)+tensor_type(3)) ); - }; - - for_each_tuple(extents,check); + }); } diff --git a/test/tensor/test_static_strides.cpp b/test/tensor/test_static_strides.cpp index 72b11c58c..5f5a203e9 100644 --- a/test/tensor/test_static_strides.cpp +++ b/test/tensor/test_static_strides.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,10 +10,7 @@ // Google and Fraunhofer IOSB, Ettlingen, Germany // -#include -#include -#include -#include +#include #include BOOST_AUTO_TEST_SUITE(test_static_strides) @@ -21,137 +18,139 @@ BOOST_AUTO_TEST_SUITE(test_static_strides) using test_types = std::tuple; -template -using extents_type = boost::numeric::ublas::basic_static_extents; -template -using strides_type = boost::numeric::ublas::strides_t; +template +using extents = boost::numeric::ublas::extents; + +using first_order = boost::numeric::ublas::layout::first_order; +using last_order = boost::numeric::ublas::layout::last_order; + +BOOST_AUTO_TEST_CASE_TEMPLATE(test_static_strides_ctor, value, test_types) +{ + namespace ublas = boost::numeric::ublas; + + constexpr auto s11 = ublas::to_strides_v,first_order>; + constexpr auto s12 = ublas::to_strides_v,first_order>; + constexpr auto s21 = ublas::to_strides_v,first_order>; + constexpr auto s23 = ublas::to_strides_v,first_order>; + constexpr auto s231 = ublas::to_strides_v,first_order>; + constexpr auto s123 = ublas::to_strides_v,first_order>; + constexpr auto s423 = ublas::to_strides_v,first_order>; + + BOOST_CHECK_EQUAL(s11.empty(), false); + BOOST_CHECK_EQUAL(s12.empty(), false); + BOOST_CHECK_EQUAL(s21.empty(), false); + BOOST_CHECK_EQUAL(s23.empty(), false); + BOOST_CHECK_EQUAL(s231.empty(), false); + BOOST_CHECK_EQUAL(s123.empty(), false); + BOOST_CHECK_EQUAL(s423.empty(), false); + + BOOST_CHECK_EQUAL(s11.size(), 2); + BOOST_CHECK_EQUAL(s12.size(), 2); + BOOST_CHECK_EQUAL(s21.size(), 2); + BOOST_CHECK_EQUAL(s23.size(), 2); + BOOST_CHECK_EQUAL(s231.size(), 3); + BOOST_CHECK_EQUAL(s123.size(), 3); + BOOST_CHECK_EQUAL(s423.size(), 3); +} -BOOST_AUTO_TEST_CASE_TEMPLATE(test_static_strides_ctor, value, test_types) { - using namespace boost::numeric; +BOOST_AUTO_TEST_CASE(test_static_strides_ctor_access_first_order) +{ + namespace ublas = boost::numeric::ublas; - strides_type, ublas::layout::first_order> s1{}; - BOOST_CHECK_EQUAL(s1.empty(), false); - BOOST_CHECK_EQUAL(s1.size(), 2); + constexpr auto s11 = ublas::to_strides_v,first_order>; + constexpr auto s12 = ublas::to_strides_v,first_order>; + constexpr auto s21 = ublas::to_strides_v,first_order>; + constexpr auto s23 = ublas::to_strides_v,first_order>; + constexpr auto s231 = ublas::to_strides_v,first_order>; + constexpr auto s213 = ublas::to_strides_v,first_order>; + constexpr auto s123 = ublas::to_strides_v,first_order>; + constexpr auto s423 = ublas::to_strides_v,first_order>; - strides_type, ublas::layout::first_order> s2{}; - BOOST_CHECK_EQUAL(s2.empty(), false); - BOOST_CHECK_EQUAL(s2.size(), 2); + BOOST_REQUIRE_EQUAL(s11.size(), 2); + BOOST_REQUIRE_EQUAL(s12.size(), 2); + BOOST_REQUIRE_EQUAL(s21.size(), 2); + BOOST_REQUIRE_EQUAL(s23.size(), 2); + BOOST_REQUIRE_EQUAL(s231.size(), 3); + BOOST_REQUIRE_EQUAL(s213.size(), 3); + BOOST_REQUIRE_EQUAL(s123.size(), 3); + BOOST_REQUIRE_EQUAL(s423.size(), 3); - strides_type, ublas::layout::first_order> s3{}; - BOOST_CHECK_EQUAL(s3.empty(), false); - BOOST_CHECK_EQUAL(s3.size(), 2); - strides_type, ublas::layout::first_order> s4{}; - BOOST_CHECK_EQUAL(s4.empty(), false); - BOOST_CHECK_EQUAL(s4.size(), 2); + BOOST_CHECK_EQUAL(s11[0], 1); + BOOST_CHECK_EQUAL(s11[1], 1); - strides_type, ublas::layout::first_order> s5{}; - BOOST_CHECK_EQUAL(s5.empty(), false); - BOOST_CHECK_EQUAL(s5.size(), 3); + BOOST_CHECK_EQUAL(s12[0], 1); + BOOST_CHECK_EQUAL(s12[1], 1); - strides_type, ublas::layout::first_order> s6{}; - BOOST_CHECK_EQUAL(s6.empty(), false); - BOOST_CHECK_EQUAL(s6.size(), 3); + BOOST_CHECK_EQUAL(s21[0], 1); + BOOST_CHECK_EQUAL(s21[1], 2); // NOTE: is this the way we want to have it? - strides_type, ublas::layout::first_order> s7{}; - BOOST_CHECK_EQUAL(s7.empty(), false); - BOOST_CHECK_EQUAL(s7.size(), 3); -} + BOOST_CHECK_EQUAL(s23[0], 1); + BOOST_CHECK_EQUAL(s23[1], 2); + + BOOST_CHECK_EQUAL(s231[0], 1); + BOOST_CHECK_EQUAL(s231[1], 2); + BOOST_CHECK_EQUAL(s231[2], 6); + + BOOST_CHECK_EQUAL(s123[0], 1); + BOOST_CHECK_EQUAL(s123[1], 1); + BOOST_CHECK_EQUAL(s123[2], 2); -BOOST_AUTO_TEST_CASE(test_static_strides_ctor_access_first_order) { - using namespace boost::numeric; - - strides_type, ublas::layout::first_order> s1{}; - BOOST_REQUIRE_EQUAL(s1.size(), 2); - BOOST_CHECK_EQUAL(s1[0], 1); - BOOST_CHECK_EQUAL(s1[1], 1); - - strides_type, ublas::layout::first_order> s2{}; - BOOST_REQUIRE_EQUAL(s2.size(), 2); - BOOST_CHECK_EQUAL(s2[0], 1); - BOOST_CHECK_EQUAL(s2[1], 1); - - strides_type, ublas::layout::first_order> s3{}; - BOOST_REQUIRE_EQUAL(s3.size(), 2); - BOOST_CHECK_EQUAL(s3[0], 1); - BOOST_CHECK_EQUAL(s3[1], 1); - - strides_type, ublas::layout::first_order> s4{}; - BOOST_REQUIRE_EQUAL(s4.size(), 2); - BOOST_CHECK_EQUAL(s4[0], 1); - BOOST_CHECK_EQUAL(s4[1], 2); - - strides_type, ublas::layout::first_order> s5{}; - BOOST_REQUIRE_EQUAL(s5.size(), 3); - BOOST_CHECK_EQUAL(s5[0], 1); - BOOST_CHECK_EQUAL(s5[1], 2); - BOOST_CHECK_EQUAL(s5[2], 6); - - strides_type, ublas::layout::first_order> s6{}; - BOOST_REQUIRE_EQUAL(s6.size(), 3); - BOOST_CHECK_EQUAL(s6[0], 1); - BOOST_CHECK_EQUAL(s6[1], 1); - BOOST_CHECK_EQUAL(s6[2], 2); - - strides_type, ublas::layout::first_order> s7{}; - BOOST_REQUIRE_EQUAL(s7.size(), 3); - BOOST_CHECK_EQUAL(s7[0], 1); - BOOST_CHECK_EQUAL(s7[1], 2); - BOOST_CHECK_EQUAL(s7[2], 2); - - strides_type, ublas::layout::first_order> s8{}; - BOOST_REQUIRE_EQUAL(s8.size(), 3); - BOOST_CHECK_EQUAL(s8[0], 1); - BOOST_CHECK_EQUAL(s8[1], 4); - BOOST_CHECK_EQUAL(s8[2], 8); + BOOST_CHECK_EQUAL(s213[0], 1); + BOOST_CHECK_EQUAL(s213[1], 2); + BOOST_CHECK_EQUAL(s213[2], 2); + + BOOST_CHECK_EQUAL(s423[0], 1); + BOOST_CHECK_EQUAL(s423[1], 4); + BOOST_CHECK_EQUAL(s423[2], 8); } -BOOST_AUTO_TEST_CASE(test_static_strides_ctor_access_last_order) { - using namespace boost::numeric; - - strides_type, ublas::layout::last_order> s1{}; - BOOST_REQUIRE_EQUAL(s1.size(), 2); - BOOST_CHECK_EQUAL(s1[0], 1); - BOOST_CHECK_EQUAL(s1[1], 1); - - strides_type, ublas::layout::last_order> s2{}; - BOOST_REQUIRE_EQUAL(s2.size(), 2); - BOOST_CHECK_EQUAL(s2[0], 1); - BOOST_CHECK_EQUAL(s2[1], 1); - - strides_type, ublas::layout::last_order> s3{}; - BOOST_REQUIRE_EQUAL(s3.size(), 2); - BOOST_CHECK_EQUAL(s3[0], 1); - BOOST_CHECK_EQUAL(s3[1], 1); - - strides_type, ublas::layout::last_order> s4{}; - BOOST_REQUIRE_EQUAL(s4.size(), 2); - BOOST_CHECK_EQUAL(s4[0], 3); - BOOST_CHECK_EQUAL(s4[1], 1); - - strides_type, ublas::layout::last_order> s5{}; - BOOST_REQUIRE_EQUAL(s5.size(), 3); - BOOST_CHECK_EQUAL(s5[0], 3); - BOOST_CHECK_EQUAL(s5[1], 1); - BOOST_CHECK_EQUAL(s5[2], 1); - - strides_type, ublas::layout::last_order> s6{}; - BOOST_REQUIRE_EQUAL(s6.size(), 3); - BOOST_CHECK_EQUAL(s6[0], 6); - BOOST_CHECK_EQUAL(s6[1], 3); - BOOST_CHECK_EQUAL(s6[2], 1); - - strides_type, ublas::layout::last_order> s7{}; - BOOST_REQUIRE_EQUAL(s7.size(), 3); - BOOST_CHECK_EQUAL(s7[0], 3); - BOOST_CHECK_EQUAL(s7[1], 3); - BOOST_CHECK_EQUAL(s7[2], 1); - - strides_type, ublas::layout::last_order> s8{}; - BOOST_REQUIRE_EQUAL(s8.size(), 3); - BOOST_CHECK_EQUAL(s8[0], 6); - BOOST_CHECK_EQUAL(s8[1], 3); - BOOST_CHECK_EQUAL(s8[2], 1); +BOOST_AUTO_TEST_CASE(test_static_strides_ctor_access_last_order) +{ + namespace ublas = boost::numeric::ublas; + + constexpr auto s11 = ublas::to_strides_v,last_order>; + constexpr auto s12 = ublas::to_strides_v,last_order>; + constexpr auto s21 = ublas::to_strides_v,last_order>; + constexpr auto s23 = ublas::to_strides_v,last_order>; + constexpr auto s231 = ublas::to_strides_v,last_order>; + constexpr auto s213 = ublas::to_strides_v,last_order>; + constexpr auto s123 = ublas::to_strides_v,last_order>; + constexpr auto s423 = ublas::to_strides_v,last_order>; + + BOOST_REQUIRE_EQUAL(s11.size(), 2); + BOOST_REQUIRE_EQUAL(s12.size(), 2); + BOOST_REQUIRE_EQUAL(s21.size(), 2); + BOOST_REQUIRE_EQUAL(s23.size(), 2); + BOOST_REQUIRE_EQUAL(s231.size(), 3); + BOOST_REQUIRE_EQUAL(s213.size(), 3); + BOOST_REQUIRE_EQUAL(s123.size(), 3); + BOOST_REQUIRE_EQUAL(s423.size(), 3); + + + BOOST_CHECK_EQUAL(s11[0], 1); + BOOST_CHECK_EQUAL(s11[1], 1); + + BOOST_CHECK_EQUAL(s12[0], 2); //NOTE: is this the way we want the stride to be computed? + BOOST_CHECK_EQUAL(s12[1], 1); + + BOOST_CHECK_EQUAL(s21[0], 1); + BOOST_CHECK_EQUAL(s21[1], 1); + + BOOST_CHECK_EQUAL(s23[0], 3); + BOOST_CHECK_EQUAL(s23[1], 1); + + BOOST_CHECK_EQUAL(s231[0], 3); + BOOST_CHECK_EQUAL(s231[1], 1); + BOOST_CHECK_EQUAL(s231[2], 1); + + BOOST_CHECK_EQUAL(s123[0], 6); + BOOST_CHECK_EQUAL(s123[1], 3); + BOOST_CHECK_EQUAL(s123[2], 1); + + BOOST_CHECK_EQUAL(s213[0], 3); + BOOST_CHECK_EQUAL(s213[1], 3); + BOOST_CHECK_EQUAL(s213[2], 1); } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_static_tensor.cpp b/test/tensor/test_static_tensor.cpp index 1fef8617d..653a2726b 100644 --- a/test/tensor/test_static_tensor.cpp +++ b/test/tensor/test_static_tensor.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -15,51 +15,45 @@ #include #include -//#ifndef BOOST_TEST_DYN_LINK -//#define BOOST_TEST_DYN_LINK -//#endif - -//#define BOOST_TEST_MODULE TestStaticTensor - #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE ( test_static_tensor ) +BOOST_AUTO_TEST_SUITE ( test_tensor_static ) using test_types = zip>::with_t; BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_ctor, value, test_types) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto a1 = ublas::static_tensor,layout_type>{}; - BOOST_CHECK_EQUAL( a1.size() , 0ul ); - BOOST_CHECK( a1.empty() ); +// auto a1 = ublas::tensor_static,layout_type>{}; +// BOOST_CHECK_EQUAL( a1.size() , 0ul ); +// BOOST_CHECK( a1.empty() ); - auto a2 = ublas::static_tensor,layout_type>{}; + auto a2 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); - auto a3 = ublas::static_tensor,layout_type>{}; + auto a3 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); - auto a4 = ublas::static_tensor,layout_type>{}; + auto a4 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); - auto a5 = ublas::static_tensor,layout_type>{}; + auto a5 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a5.size() , 2 ); BOOST_CHECK( !a5.empty() ); - auto a6 = ublas::static_tensor,layout_type>{}; + auto a6 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a6.size() , 4*3*2 ); BOOST_CHECK( !a6.empty() ); - auto a7 = ublas::static_tensor,layout_type>{}; + auto a7 = ublas::tensor_static,layout_type>{}; BOOST_CHECK_EQUAL( a7.size() , 4*1*2 ); BOOST_CHECK( !a7.empty() ); @@ -69,9 +63,9 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_ctor, value, test_types) struct fixture { template - using extents_type = boost::numeric::ublas::static_extents; + using extents_type = boost::numeric::ublas::extents; - fixture() {} + fixture()=default; std::tuple< extents_type<1,1>, // 1 @@ -85,17 +79,17 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - for_each_tuple(extents, [](auto const&, auto& e){ + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - auto t = ublas::static_tensor{}; + auto t = ublas::tensor_static{}; - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - if(e.empty()) { + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); } else{ @@ -108,14 +102,14 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents, value, test_types, BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - for_each_tuple(extents, [](auto const&, auto& e){ + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - auto r = ublas::static_tensor{0}; + auto r = ublas::tensor_static{0}; auto t = r; BOOST_CHECK_EQUAL ( t.size() , r.size() ); @@ -123,7 +117,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fix BOOST_CHECK ( t.strides() == r.strides() ); BOOST_CHECK ( t.extents() == r.extents() ); - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); } else{ @@ -139,17 +133,17 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fix BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; using other_layout_type = std::conditional_t::value, ublas::layout::last_order, ublas::layout::first_order>; - for_each_tuple(extents, [](auto const&, auto& e){ + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto r = tensor_type{0}; - ublas::static_tensor t = r; + ublas::tensor_static t = r; tensor_type q = t; BOOST_CHECK_EQUAL ( t.size() , r.size() ); @@ -170,20 +164,20 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_typ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - auto check = [](auto const&, auto& e) + auto check = [](auto const& /*unused*/, auto& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto r = tensor_type{}; auto t = std::move(r); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); } else{ @@ -192,13 +186,13 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; @@ -208,9 +202,9 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_ty using distribution_type = std::conditional_t, std::uniform_int_distribution<>, std::uniform_real_distribution<> >; auto distribution = distribution_type(1,6); - for_each_tuple(extents, [&](auto const&, auto const& e){ + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto const& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto r = value_type( static_cast< inner_type_t >(distribution(generator)) ); auto t = tensor_type{r}; @@ -224,23 +218,23 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_ty BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - for_each_tuple(extents, [](auto const&, auto& e){ + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; - using array_type = typename tensor_type::array_type; + using tensor_type = ublas::tensor_static; + using container_type = typename tensor_type::container_type; - auto a = array_type(); + auto a = container_type(); auto v = value_type {}; for(auto& aa : a){ aa = v; v += value_type{1}; } - auto t = tensor_type{a}; + auto t = tensor_type(a); v = value_type{}; for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}) @@ -253,13 +247,13 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_t BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - for_each_tuple(extents, [](auto const&, auto& e){ + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type{}; auto v = value_type {}; @@ -278,7 +272,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, va BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; @@ -293,7 +287,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check2 = [](const auto& t) { - std::array k; + std::array k = {0,0}; auto r = std::is_same::value ? 1 : 0; auto q = std::is_same::value ? 1 : 0; auto v = value_type{}; @@ -307,7 +301,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check3 = [](const auto& t) { - std::array k; + std::array k= {0,0,0}; using op_type = std::conditional_t, std::minus<>, std::plus<>>; auto r = std::is_same_v ? 2 : 0; auto o = op_type{}; @@ -324,7 +318,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check4 = [](const auto& t) { - std::array k; + std::array k= {0,0,0,0}; using op_type = std::conditional_t, std::minus<>, std::plus<>>; auto r = std::is_same_v ? 3 : 0; auto o = op_type{}; @@ -341,9 +335,9 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, } }; - auto check = [check1,check2,check3,check4](auto const&, auto const& e) { + auto check = [check1,check2,check3,check4](auto const& /*unused*/, auto const& e) { using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto t = tensor_type{}; auto v = value_type {}; for(auto i = 0ul; i < t.size(); ++i){ @@ -351,25 +345,25 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, v+=value_type{1}; } - if constexpr ( extents_type::_size == 1) check1(t); - else if constexpr ( extents_type::_size == 2) check2(t); - else if constexpr ( extents_type::_size == 3) check3(t); - else if constexpr ( extents_type::_size == 4) check4(t); + if constexpr ( std::tuple_size_v == 1) check1(t); + else if constexpr ( std::tuple_size_v == 2) check2(t); + else if constexpr ( std::tuple_size_v == 3) check3(t); + else if constexpr ( std::tuple_size_v == 4) check4(t); }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - for_each_tuple(extents,[](auto const&, auto& e){ + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e){ using extents_type = std::decay_t; - using tensor_type = ublas::static_tensor; + using tensor_type = ublas::tensor_static; auto v = value_type {} + value_type{1}; auto t = tensor_type{v}; @@ -380,7 +374,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_ty BOOST_CHECK_EQUAL( std::distance(t.cbegin(), t.cend ()), t.size() ); BOOST_CHECK_EQUAL( std::distance(t.crbegin(), t.crend()), t.size() ); - if(t.size() > 0) { + if(!t.empty()) { BOOST_CHECK( t.data() == std::addressof( *t.begin () ) ) ; BOOST_CHECK( t.data() == std::addressof( *t.cbegin() ) ) ; } @@ -388,17 +382,17 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_ty } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_throw, value, test_types, fixture) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::static_tensor, layout_type>; +//BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_throw, value, test_types, fixture) +//{ +// namespace ublas = boost::numeric::ublas; +// using value_type = typename value::first_type; +// using layout_type = typename value::second_type; +// using tensor_type = ublas::tensor_static, layout_type>; - auto t = tensor_type{}; - auto i = ublas::index::index_type<4>{}; - BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::runtime_error); +// auto t = tensor_type{}; +// auto i = ublas::index::index_type<4>{}; +// BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::runtime_error); -} +//} BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_static_tensor_matrix_vector.cpp b/test/tensor/test_static_tensor_matrix_vector.cpp index bce3749ad..22c2ba434 100644 --- a/test/tensor/test_static_tensor_matrix_vector.cpp +++ b/test/tensor/test_static_tensor_matrix_vector.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -21,58 +21,58 @@ -BOOST_AUTO_TEST_SUITE ( test_static_tensor_matrix_interoperability ) ; +BOOST_AUTO_TEST_SUITE ( test_tensor_static_matrix_interoperability ) using test_types = zip::with_t; -BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, value, test_types) +BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, pair, test_types) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; - ublas::static_tensor,layout_type> a2 = matrix_type(1,1); + ublas::tensor_static,layout> a2 = matrix(1,1); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); - ublas::static_tensor,layout_type> a3 = matrix_type(2,1); + ublas::tensor_static,layout> a3 = matrix(2,1); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); - ublas::static_tensor,layout_type> a4 = matrix_type(1,2); + ublas::tensor_static,layout> a4 = matrix(1,2); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); - ublas::static_tensor,layout_type> a5 = matrix_type(2,3); + ublas::tensor_static,layout> a5 = matrix(2,3); BOOST_CHECK_EQUAL( a5.size() , 6 ); BOOST_CHECK( !a5.empty() ); } -BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) +BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, pair, test_types) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; - ublas::static_tensor,layout_type> a2 = vector_type(1); + ublas::tensor_static,layout> a2 = vector(1); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); - ublas::static_tensor,layout_type> a3 = vector_type(2); + ublas::tensor_static,layout> a3 = vector(2); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); - ublas::static_tensor,layout_type> a4 = vector_type(2); + ublas::tensor_static,layout> a4 = vector(2); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); - ublas::static_tensor,layout_type> a5 = vector_type(3); + ublas::tensor_static,layout> a5 = vector(3); BOOST_CHECK_EQUAL( a5.size() , 3 ); BOOST_CHECK( !a5.empty() ); } @@ -81,96 +81,96 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) struct fixture { template - using extents_type = boost::numeric::ublas::static_extents; + using shape = boost::numeric::ublas::extents; - fixture() {} + fixture()=default; std::tuple< - extents_type<1,1>, // 0 - extents_type<2,3>, // 1 - extents_type<9,7>, // 2 - extents_type<15,17> // 3 - > extents;; + shape<1,1>, // 0 + shape<2,3>, // 1 + shape<5,8>, // 2 + shape<9,7> // 3 + > extents; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size()==2); - etensor_type t = matrix_type{e[0],e[1]}; - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e)==2); + etensor t = matrix{e[0],e[1]}; + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - if constexpr( extents_type::at(1) == 1 ){ - assert(e.size()==2); - if(e.empty()) + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; + + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + if constexpr( ublas::get_v == 1 ){ + assert(ublas::size(e)==2); + if(ublas::empty(e)) return; - etensor_type t = vector_type(product(e)); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + etensor t = vector(ublas::product(e)); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); } }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e) == 2); + auto t = etensor{}; + auto r = matrix(e[0],e[1]); std::iota(r.data().begin(),r.data().end(), 1); t = r; BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); for(auto j = 0ul; j < t.size(1); ++j){ @@ -180,34 +180,34 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, te } }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; - assert(e.size() == 2); + assert(ublas::size(e) == 2); - if constexpr( extents_type::at(1) == 1 ){ - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); + if constexpr( ublas::get_v == 1 ){ + auto t = etensor{}; + auto r = vector(e[0]*e[1]); std::iota(r.data().begin(),r.data().end(), 1); t = r; BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product (e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); for(auto i = 0ul; i < t.size(); ++i){ @@ -216,32 +216,32 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, te } }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e) == 2); + auto t = etensor{}; + auto r = matrix(e[0],e[1]); std::iota(r.data().begin(),r.data().end(), 1); auto q = r; t = std::move(r); BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); for(auto j = 0ul; j < t.size(1); ++j){ @@ -251,36 +251,36 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, te } }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size() == 2); - if constexpr( extents_type::at(1) == 1 ){ - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; + + auto check = [](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e) == 2); + if constexpr( ublas::get_v == 1 ){ + auto t = etensor{}; + auto r = vector(e[0]*e[1]); std::iota(r.data().begin(),r.data().end(), 1); auto q = r; t = std::move(r); BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) * e.at(1)); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); for(auto i = 0ul; i < t.size(); ++i){ @@ -289,50 +289,50 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, te } }; - for_each_tuple(extents,check); + for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size() == 2); - auto t = etensor_type{}; - auto r = matrix_type(e[0],e[1]); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e) == 2); + auto t = etensor{}; + auto r = matrix(e[0],e[1]); std::iota(r.data().begin(),r.data().end(), 1); t = r + 3*r; - etensor_type s = r + 3*r; - etensor_type q = s + r + 3*r + s; // + 3*r + etensor s = r + 3*r; + etensor q = s + r + 3*r + s; // + 3*r BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( s.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); BOOST_CHECK ( !s.empty() ); BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( q.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); BOOST_CHECK ( !q.empty() ); @@ -343,9 +343,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_t BOOST_CHECK_EQUAL( q.at(i,j), 3*s.at(i,j) ); } } - }; - - for_each_tuple(extents,check); + }); } @@ -353,44 +351,44 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_t -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; - - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; - using etensor_type = ublas::static_tensor; - - assert(e.size() == 2); - if constexpr( extents_type::at(1) == 1 ){ - auto t = etensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; + + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; + using etensor = ublas::tensor_static; + + assert(ublas::size(e) == 2); + if constexpr( ublas::get_v == 1 ){ + auto t = etensor{}; + auto r = vector(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); t = r + 3*r; - etensor_type s = r + 3*r; - etensor_type q = s + r + 3*r + s; // + 3*r + etensor s = r + 3*r; + etensor q = s + r + 3*r + s; // + 3*r BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( s.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); BOOST_CHECK ( !s.empty() ); BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( q.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); BOOST_CHECK ( !q.empty() ); @@ -401,36 +399,35 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_t BOOST_CHECK_EQUAL( q.at(i), 3*s.at(i) ); } } - }; - - for_each_tuple(extents,check); + }); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + using vector = typename tensor::vector_type; - auto check = [](auto const&, auto& e) { - using extents_type = std::decay_t; + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { + using shape = std::decay_t; - if(product(e) <= 2) + if constexpr(ublas::product_v <= 2) return; - assert(e.size() == 2); - auto Q = ublas::static_tensor,layout_type>{} ; - auto A = matrix_type(e[0],e[1]); - auto b = vector_type(e[1]); - auto c = vector_type(e[0]); - std::iota(b.data().begin(),b.data().end(), 1); - std::fill(A.data().begin(),A.data().end(), 1); - std::fill(c.data().begin(),c.data().end(), 2); + assert(ublas::size_v == 2); + + auto Q = ublas::tensor_static,1>,layout>{} ; + auto A = matrix(e[0],e[1]); + auto b = vector(e[1]); + auto c = vector(e[0]); + std::iota(b.data().begin(),b.data().end(), value{1}); + std::fill(A.data().begin(),A.data().end(), value{1}); + std::fill(c.data().begin(),c.data().end(), value{2}); std::fill(Q.begin(),Q.end(), 2); decltype(Q) T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; @@ -442,17 +439,15 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, value, BOOST_CHECK_EQUAL ( T.rank() , Q.rank() ); BOOST_CHECK ( !T.empty() ); - for(auto i = 0ul; i < T.size(); ++i){ - auto n = e[1]; - auto ab = n * (n+1) / 2; - BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); - } - - }; + auto n = e[1]; + auto ab = value(std::div(n*(n+1),2).quot); + const auto ref = ab+4*Q(0)+2*c(0); + BOOST_CHECK( std::all_of(T.begin(),T.end(), [ref](auto cc){ return ref == cc; }) ); +// BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); - for_each_tuple(extents,check); + }); } diff --git a/test/tensor/test_strides.cpp b/test/tensor/test_strides.cpp index 25d292223..71ef94256 100644 --- a/test/tensor/test_strides.cpp +++ b/test/tensor/test_strides.cpp @@ -12,157 +12,149 @@ #include -#include -#include - -//BOOST_AUTO_TEST_SUITE(test_strides, * boost::unit_test::depends_on("test_extents")); +#include BOOST_AUTO_TEST_SUITE(test_strides) using test_types = std::tuple; +using extents = boost::numeric::ublas::extents<>; +using first_order = boost::numeric::ublas::layout::first_order; +using last_order = boost::numeric::ublas::layout::last_order; + + BOOST_AUTO_TEST_CASE_TEMPLATE( test_strides_ctor, value, test_types) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; + constexpr auto layout = value{}; + + auto s1 = ublas::to_strides(extents {1},layout); + auto s5 = ublas::to_strides(extents {5},layout); + auto s11 = ublas::to_strides(extents {1,1},layout); + auto s12 = ublas::to_strides(extents {1,2},layout); + auto s21 = ublas::to_strides(extents {2,1},layout); + auto s23 = ublas::to_strides(extents {2,3},layout); + auto s231 = ublas::to_strides(extents{2,3,1},layout); + auto s123 = ublas::to_strides(extents{1,2,3},layout); + auto s423 = ublas::to_strides(extents{4,2,3},layout); + + BOOST_CHECK (! s1.empty()); + BOOST_CHECK (! s5.empty()); + BOOST_CHECK (! s11.empty()); + BOOST_CHECK (! s12.empty()); + BOOST_CHECK (! s21.empty()); + BOOST_CHECK (! s23.empty()); + BOOST_CHECK (!s231.empty()); + BOOST_CHECK (!s123.empty()); + BOOST_CHECK (!s423.empty()); + + BOOST_CHECK_EQUAL ( s1.size(), 1); + BOOST_CHECK_EQUAL ( s5.size(), 1); + BOOST_CHECK_EQUAL ( s11.size(), 2); + BOOST_CHECK_EQUAL ( s12.size(), 2); + BOOST_CHECK_EQUAL ( s21.size(), 2); + BOOST_CHECK_EQUAL ( s23.size(), 2); + BOOST_CHECK_EQUAL ( s231.size(), 3); + BOOST_CHECK_EQUAL ( s123.size(), 3); + BOOST_CHECK_EQUAL ( s423.size(), 3); +} - using extents_type = ublas::basic_extents; - using strides_type = ublas::strides_t; +BOOST_AUTO_TEST_CASE( test_strides_ctor_access_first_order) +{ + namespace ublas = boost::numeric::ublas; + constexpr auto layout = first_order{}; - strides_type s0{}; - BOOST_CHECK ( s0.empty()); - BOOST_CHECK_EQUAL ( s0.size(), 0); + auto s1 = ublas::to_strides(extents {1},layout); + auto s5 = ublas::to_strides(extents {5},layout); + auto s11 = ublas::to_strides(extents {1,1},layout); + auto s12 = ublas::to_strides(extents {1,2},layout); + auto s21 = ublas::to_strides(extents {2,1},layout); + auto s23 = ublas::to_strides(extents {2,3},layout); + auto s231 = ublas::to_strides(extents{2,3,1},layout); + auto s123 = ublas::to_strides(extents{1,2,3},layout); + auto s423 = ublas::to_strides(extents{4,2,3},layout); - strides_type s1{extents_type{1,1}}; - BOOST_CHECK (!s1.empty()); - BOOST_CHECK_EQUAL ( s1.size(), 2); + BOOST_REQUIRE_EQUAL ( s11 .size(),2); + BOOST_REQUIRE_EQUAL ( s12 .size(),2); + BOOST_REQUIRE_EQUAL ( s21 .size(),2); + BOOST_REQUIRE_EQUAL ( s23 .size(),2); + BOOST_REQUIRE_EQUAL ( s231.size(),3); + BOOST_REQUIRE_EQUAL ( s123.size(),3); + BOOST_REQUIRE_EQUAL ( s423.size(),3); - strides_type s2{extents_type{1,2}}; - BOOST_CHECK (!s2.empty()); - BOOST_CHECK_EQUAL ( s2.size(), 2); - strides_type s3{extents_type{2,1}}; - BOOST_CHECK (!s3.empty()); - BOOST_CHECK_EQUAL ( s3.size(), 2); + BOOST_CHECK_EQUAL ( s11[0], 1); + BOOST_CHECK_EQUAL ( s11[1], 1); - strides_type s4{extents_type{2,3}}; - BOOST_CHECK (!s4.empty()); - BOOST_CHECK_EQUAL ( s4.size(), 2); + BOOST_CHECK_EQUAL ( s12[0], 1); + BOOST_CHECK_EQUAL ( s12[1], 1); - strides_type s5{extents_type{2,3,1}}; - BOOST_CHECK (!s5.empty()); - BOOST_CHECK_EQUAL ( s5.size(), 3); + BOOST_CHECK_EQUAL ( s21[0], 1); + BOOST_CHECK_EQUAL ( s21[1], 1); - strides_type s6{extents_type{1,2,3}}; - BOOST_CHECK (!s6.empty()); - BOOST_CHECK_EQUAL ( s6.size(), 3); - strides_type s7{extents_type{4,2,3}}; - BOOST_CHECK (!s7.empty()); - BOOST_CHECK_EQUAL ( s7.size(), 3); -} + BOOST_CHECK_EQUAL ( s23[0], 1); + BOOST_CHECK_EQUAL ( s23[1], 2); -BOOST_AUTO_TEST_CASE( test_strides_ctor_access_first_order) -{ - using namespace boost::numeric; - - using extents_type = ublas::basic_extents; - using strides_type = ublas::strides_t; - - strides_type s1{extents_type{1,1}}; - BOOST_REQUIRE_EQUAL( s1.size(),2); - BOOST_CHECK_EQUAL ( s1[0], 1); - BOOST_CHECK_EQUAL ( s1[1], 1); - - strides_type s2{extents_type{1,2}}; - BOOST_REQUIRE_EQUAL ( s2.size(),2); - BOOST_CHECK_EQUAL ( s2[0], 1); - BOOST_CHECK_EQUAL ( s2[1], 1); - - strides_type s3{extents_type{2,1}}; - BOOST_REQUIRE_EQUAL ( s3.size(),2); - BOOST_CHECK_EQUAL ( s3[0], 1); - BOOST_CHECK_EQUAL ( s3[1], 1); - - strides_type s4{extents_type{2,3}}; - BOOST_REQUIRE_EQUAL ( s4.size(),2); - BOOST_CHECK_EQUAL ( s4[0], 1); - BOOST_CHECK_EQUAL ( s4[1], 2); - - strides_type s5{extents_type{2,3,1}}; - BOOST_REQUIRE_EQUAL ( s5.size(),3); - BOOST_CHECK_EQUAL ( s5[0], 1); - BOOST_CHECK_EQUAL ( s5[1], 2); - BOOST_CHECK_EQUAL ( s5[2], 6); - - strides_type s6{extents_type{1,2,3}}; - BOOST_REQUIRE_EQUAL ( s6.size(),3); - BOOST_CHECK_EQUAL ( s6[0], 1); - BOOST_CHECK_EQUAL ( s6[1], 1); - BOOST_CHECK_EQUAL ( s6[2], 2); - - strides_type s7{extents_type{2,1,3}}; - BOOST_REQUIRE_EQUAL ( s7.size(),3); - BOOST_CHECK_EQUAL ( s7[0], 1); - BOOST_CHECK_EQUAL ( s7[1], 2); - BOOST_CHECK_EQUAL ( s7[2], 2); - - strides_type s8{extents_type{4,2,3}}; - BOOST_REQUIRE_EQUAL ( s8.size(),3); - BOOST_CHECK_EQUAL ( s8[0], 1); - BOOST_CHECK_EQUAL ( s8[1], 4); - BOOST_CHECK_EQUAL ( s8[2], 8); + BOOST_CHECK_EQUAL ( s231[0], 1); + BOOST_CHECK_EQUAL ( s231[1], 2); + BOOST_CHECK_EQUAL ( s231[2], 6); + + BOOST_CHECK_EQUAL ( s123[0], 1); + BOOST_CHECK_EQUAL ( s123[1], 1); + BOOST_CHECK_EQUAL ( s123[2], 2); + + BOOST_CHECK_EQUAL ( s423[0], 1); + BOOST_CHECK_EQUAL ( s423[1], 4); + BOOST_CHECK_EQUAL ( s423[2], 8); } BOOST_AUTO_TEST_CASE( test_strides_ctor_access_last_order) { - using namespace boost::numeric; - - using extents_type = ublas::basic_extents; - using strides_type = ublas::strides_t; - - strides_type s1{extents_type{1,1}}; - BOOST_REQUIRE_EQUAL( s1.size(),2); - BOOST_CHECK_EQUAL ( s1[0], 1); - BOOST_CHECK_EQUAL ( s1[1], 1); - - strides_type s2{extents_type{1,2}}; - BOOST_REQUIRE_EQUAL ( s2.size(),2); - BOOST_CHECK_EQUAL ( s2[0], 1); - BOOST_CHECK_EQUAL ( s2[1], 1); - - strides_type s3{extents_type{2,1}}; - BOOST_REQUIRE_EQUAL ( s3.size(),2); - BOOST_CHECK_EQUAL ( s3[0], 1); - BOOST_CHECK_EQUAL ( s3[1], 1); - - strides_type s4{extents_type{2,3}}; - BOOST_REQUIRE_EQUAL ( s4.size(),2); - BOOST_CHECK_EQUAL ( s4[0], 3); - BOOST_CHECK_EQUAL ( s4[1], 1); - - strides_type s5{extents_type{2,3,1}}; - BOOST_REQUIRE_EQUAL ( s5.size(),3); - BOOST_CHECK_EQUAL ( s5[0], 3); - BOOST_CHECK_EQUAL ( s5[1], 1); - BOOST_CHECK_EQUAL ( s5[2], 1); - - strides_type s6{extents_type{1,2,3}}; - BOOST_REQUIRE_EQUAL ( s6.size(),3); - BOOST_CHECK_EQUAL ( s6[0], 6); - BOOST_CHECK_EQUAL ( s6[1], 3); - BOOST_CHECK_EQUAL ( s6[2], 1); - - strides_type s7{extents_type{2,1,3}}; - BOOST_REQUIRE_EQUAL ( s7.size(),3); - BOOST_CHECK_EQUAL ( s7[0], 3); - BOOST_CHECK_EQUAL ( s7[1], 3); - BOOST_CHECK_EQUAL ( s7[2], 1); - - strides_type s8{extents_type{4,2,3}}; - BOOST_REQUIRE_EQUAL ( s8.size(),3); - BOOST_CHECK_EQUAL ( s8[0], 6); - BOOST_CHECK_EQUAL ( s8[1], 3); - BOOST_CHECK_EQUAL ( s8[2], 1); + namespace ublas = boost::numeric::ublas; + constexpr auto layout = last_order{}; + + auto s1 = ublas::to_strides(extents {1},layout); + auto s5 = ublas::to_strides(extents {5},layout); + auto s11 = ublas::to_strides(extents {1,1},layout); + auto s12 = ublas::to_strides(extents {1,2},layout); + auto s21 = ublas::to_strides(extents {2,1},layout); + auto s23 = ublas::to_strides(extents {2,3},layout); + auto s231 = ublas::to_strides(extents{2,3,1},layout); + auto s123 = ublas::to_strides(extents{1,2,3},layout); + auto s423 = ublas::to_strides(extents{4,2,3},layout); + + BOOST_REQUIRE_EQUAL ( s11 .size(),2); + BOOST_REQUIRE_EQUAL ( s12 .size(),2); + BOOST_REQUIRE_EQUAL ( s21 .size(),2); + BOOST_REQUIRE_EQUAL ( s23 .size(),2); + BOOST_REQUIRE_EQUAL ( s231.size(),3); + BOOST_REQUIRE_EQUAL ( s123.size(),3); + BOOST_REQUIRE_EQUAL ( s423.size(),3); + + BOOST_CHECK_EQUAL ( s11[0], 1); + BOOST_CHECK_EQUAL ( s11[1], 1); + + BOOST_CHECK_EQUAL ( s12[0], 1); + BOOST_CHECK_EQUAL ( s12[1], 1); + + BOOST_CHECK_EQUAL ( s21[0], 1); + BOOST_CHECK_EQUAL ( s21[1], 1); + + BOOST_CHECK_EQUAL ( s23[0], 3); + BOOST_CHECK_EQUAL ( s23[1], 1); + + BOOST_CHECK_EQUAL ( s231[0], 3); + BOOST_CHECK_EQUAL ( s231[1], 1); + BOOST_CHECK_EQUAL ( s231[2], 1); + + BOOST_CHECK_EQUAL ( s123[0], 6); + BOOST_CHECK_EQUAL ( s123[1], 3); + BOOST_CHECK_EQUAL ( s123[2], 1); + + BOOST_CHECK_EQUAL ( s423[0], 6); + BOOST_CHECK_EQUAL ( s423[1], 3); + BOOST_CHECK_EQUAL ( s423[2], 1); } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_tensor.cpp b/test/tensor/test_tensor.cpp index 4ded6b1a0..ce16c8916 100644 --- a/test/tensor/test_tensor.cpp +++ b/test/tensor/test_tensor.cpp @@ -1,6 +1,6 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -18,14 +18,13 @@ #ifndef BOOST_TEST_DYN_LINK #define BOOST_TEST_DYN_LINK #endif - +// NOLINTNEXTLINE #define BOOST_TEST_MODULE Tensor #include #include "utility.hpp" -// BOOST_AUTO_TEST_SUITE ( test_tensor, * boost::unit_test::depends_on("test_extents") ) ; BOOST_AUTO_TEST_SUITE ( test_tensor ) using test_types = zip>::with_t; @@ -33,15 +32,15 @@ using test_types = zip>::with_t; + using tensor_type = ublas::tensor_dynamic; - auto a1 = tensor_type{}; - BOOST_CHECK_EQUAL( a1.size() , 0ul ); - BOOST_CHECK( a1.empty() ); - BOOST_CHECK_EQUAL( a1.data() , nullptr); +// auto a1 = tensor_type{}; +// BOOST_CHECK_EQUAL( a1.size() , 0ul ); +// BOOST_CHECK( a1.empty() ); +// BOOST_CHECK_EQUAL( a1.data() , nullptr); auto a2 = tensor_type{1,1}; BOOST_CHECK_EQUAL( a2.size() , 1 ); @@ -82,7 +81,6 @@ struct fixture using extents_type = boost::numeric::ublas::extents<>; fixture() : extents { - extents_type{}, // 0 extents_type{1,1}, // 1 extents_type{1,2}, // 2 extents_type{2,1}, // 3 @@ -100,16 +98,16 @@ struct fixture BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) { auto t = tensor_type{e}; - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - if(e.empty()) { + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size(e) ); + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); BOOST_CHECK_EQUAL ( t.data() , nullptr); } @@ -126,10 +124,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents, value, test_types, BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) { @@ -140,7 +138,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fix BOOST_CHECK ( t.strides() == r.strides() ); BOOST_CHECK ( t.extents() == r.extents() ); - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); BOOST_CHECK_EQUAL ( t.data() , nullptr); } @@ -160,12 +158,12 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor, value, test_types, fix BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; using other_layout_type = std::conditional_t::value, ublas::layout::last_order, ublas::layout::first_order>; - using other_tensor_type = ublas::dynamic_tensor; + using other_tensor_type = ublas::tensor_dynamic; for(auto const& e : extents) @@ -191,19 +189,19 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_typ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check = [](auto const& e) { auto r = tensor_type{e}; auto t = std::move(r); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK ( t.empty() ); BOOST_CHECK_EQUAL ( t.data() , nullptr); } @@ -221,10 +219,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_types, fixture ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; std::random_device device{}; std::minstd_rand0 generator(device()); @@ -244,14 +242,14 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_ty BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using array_type = typename tensor_type::array_type; + using tensor_type = ublas::tensor_dynamic; + using container_type = typename tensor_type::container_type; for(auto const& e : extents) { - auto a = array_type(product(e)); + auto a = container_type(product(e)); auto v = value_type {}; for(auto& aa : a){ @@ -270,10 +268,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_t BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& e : extents) { auto t = tensor_type{e}; @@ -292,10 +290,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, va BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; auto check1 = [](const tensor_type& t) { auto v = value_type{}; @@ -307,7 +305,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check2 = [](const tensor_type& t) { - std::array k; + std::array k = {0,0}; auto r = std::is_same::value ? 1 : 0; auto q = std::is_same::value ? 1 : 0; auto v = value_type{}; @@ -321,7 +319,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check3 = [](const tensor_type& t) { - std::array k; + std::array k = {0,0,0}; using op_type = std::conditional_t, std::minus<>, std::plus<>>; auto r = std::is_same_v ? 2 : 0; auto o = op_type{}; @@ -338,7 +336,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, auto check4 = [](const tensor_type& t) { - std::array k; + std::array k = {0,0,0,0}; using op_type = std::conditional_t, std::minus<>, std::plus<>>; auto r = std::is_same_v ? 3 : 0; auto o = op_type{}; @@ -379,10 +377,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& efrom : extents){ for(auto const& eto : extents){ @@ -393,17 +391,17 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixtu for(auto i = 0ul; i < t.size(); ++i) BOOST_CHECK_EQUAL( t[i], v ); - t.reshape(eto); - for(auto i = 0ul; i < std::min(product(efrom),product(eto)); ++i) - BOOST_CHECK_EQUAL( t[i], v ); + auto r = reshape(t,eto); + for(auto i = 0ul; i < std::min(ublas::product(efrom),ublas::product(eto)); ++i) + BOOST_CHECK_EQUAL( r[i], v ); - BOOST_CHECK_EQUAL ( t.size() , product(eto) ); - BOOST_CHECK_EQUAL ( t.rank() , eto.size() ); - BOOST_CHECK ( t.extents() == eto ); + BOOST_CHECK_EQUAL ( r.size() , ublas::product(eto) ); + BOOST_CHECK_EQUAL ( r.rank() , ublas::size (eto) ); + BOOST_CHECK ( r.extents() == eto ); if(efrom != eto){ for(auto i = product(efrom); i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], value_type{} ); + BOOST_CHECK_EQUAL( r[i], value_type{} ); } } } @@ -414,10 +412,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixtu BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& e_t : extents){ for(auto const& e_r : extents) { @@ -431,15 +429,15 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) for(auto i = 0ul; i < t.size(); ++i) BOOST_CHECK_EQUAL( t[i], w ); - BOOST_CHECK_EQUAL ( t.size() , product(e_r) ); - BOOST_CHECK_EQUAL ( t.rank() , e_r.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e_r) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e_r) ); BOOST_CHECK ( t.extents() == e_r ); for(auto i = 0ul; i < r.size(); ++i) BOOST_CHECK_EQUAL( r[i], v ); - BOOST_CHECK_EQUAL ( r.size() , product(e_t) ); - BOOST_CHECK_EQUAL ( r.rank() , e_t.size() ); + BOOST_CHECK_EQUAL ( r.size() , ublas::product(e_t) ); + BOOST_CHECK_EQUAL ( r.rank() , ublas::size (e_t) ); BOOST_CHECK ( r.extents() == e_t ); @@ -451,10 +449,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; for(auto const& e : extents) { @@ -467,7 +465,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_ty BOOST_CHECK_EQUAL( std::distance(t.cbegin(), t.cend ()), t.size() ); BOOST_CHECK_EQUAL( std::distance(t.crbegin(), t.crend()), t.size() ); - if(t.size() > 0) { + if(!t.empty()) { BOOST_CHECK( t.data() == std::addressof( *t.begin () ) ) ; BOOST_CHECK( t.data() == std::addressof( *t.cbegin() ) ) ; } @@ -476,17 +474,17 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_ty BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_throw, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; + using tensor_type = ublas::tensor_dynamic; - std::vector vec(30); - BOOST_CHECK_THROW(tensor_type({5,5},vec), std::runtime_error); + std::vector vec(2); + BOOST_CHECK_THROW(tensor_type({5,5},vec), std::invalid_argument); auto t = tensor_type{{5,5}}; auto i = ublas::index::index_type<4>{}; - BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::runtime_error); + BOOST_CHECK_THROW((void)t.operator()(i,i,i), std::invalid_argument); } diff --git a/test/tensor/test_tensor_matrix_vector.cpp b/test/tensor/test_tensor_matrix_vector.cpp index 7672bb31a..bea8566a1 100644 --- a/test/tensor/test_tensor_matrix_vector.cpp +++ b/test/tensor/test_tensor_matrix_vector.cpp @@ -19,67 +19,65 @@ #include "utility.hpp" -// BOOST_AUTO_TEST_SUITE ( test_tensor_matrix_interoperability, * boost::unit_test::depends_on("test_tensor") ) ; - BOOST_AUTO_TEST_SUITE ( test_tensor_matrix_interoperability ) using test_types = zip::with_t; -BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, value, test_types) +BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, pair, test_types) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; - tensor_type a2 = matrix_type(1,1); + tensor a2 = matrix(1,1); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); BOOST_CHECK_NE( a2.data() , nullptr); - tensor_type a3 = matrix_type(2,1); + tensor a3 = matrix(2,1); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); BOOST_CHECK_NE( a3.data() , nullptr); - tensor_type a4 = matrix_type(1,2); + tensor a4 = matrix(1,2); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); BOOST_CHECK_NE( a4.data() , nullptr); - tensor_type a5 = matrix_type(2,3); + tensor a5 = matrix(2,3); BOOST_CHECK_EQUAL( a5.size() , 6 ); BOOST_CHECK( !a5.empty() ); BOOST_CHECK_NE( a5.data() , nullptr); } -BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) +BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, pair, test_types) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; - tensor_type a2 = vector_type(1); + tensor a2 = vector(1); BOOST_CHECK_EQUAL( a2.size() , 1 ); BOOST_CHECK( !a2.empty() ); BOOST_CHECK_NE( a2.data() , nullptr); - tensor_type a3 = vector_type(2); + tensor a3 = vector(2); BOOST_CHECK_EQUAL( a3.size() , 2 ); BOOST_CHECK( !a3.empty() ); BOOST_CHECK_NE( a3.data() , nullptr); - tensor_type a4 = vector_type(2); + tensor a4 = vector(2); BOOST_CHECK_EQUAL( a4.size() , 2 ); BOOST_CHECK( !a4.empty() ); BOOST_CHECK_NE( a4.data() , nullptr); - tensor_type a5 = vector_type(3); + tensor a5 = vector(3); BOOST_CHECK_EQUAL( a5.size() , 3 ); BOOST_CHECK( !a5.empty() ); BOOST_CHECK_NE( a5.data() , nullptr); @@ -88,34 +86,35 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) struct fixture { - using extents_type = boost::numeric::ublas::extents<>; - fixture() - : extents{ - extents_type{1,1}, // 1 - extents_type{2,3}, // 2 - extents_type{9,11}, // 2 - extents_type{15,17}} // 3 - { - } - std::vector extents; + using extents_type = boost::numeric::ublas::extents<>; + fixture() + : extents{ + extents_type{1,1}, // 1 + extents_type{2,3}, // 2 + extents_type{5,6}, // 3 + extents_type{9,7}} // 4 + { + } + + std::vector extents; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; auto check = [](auto const& e) { - assert(e.size()==2); - tensor_type t = matrix_type{e[0],e[1]}; - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + assert(ublas::size(e)==2); + tensor t = matrix{e[0],e[1]}; + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); }; @@ -125,22 +124,22 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, value, } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; auto check = [](auto const& e) { - assert(e.size()==2); - if(e.empty()) + assert(ublas::size(e)==2); + if(ublas::empty(e)) return; - tensor_type t = vector_type(product(e)); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + tensor t = vector(ublas::product(e)); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); }; @@ -151,26 +150,27 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, value, -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = matrix_type(e[0],e[1]); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = matrix(e[0],e[1]); std::iota(r.data().begin(),r.data().end(), 1); t = r; BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); @@ -186,26 +186,26 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, te } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = vector(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); t = r; BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); @@ -218,27 +218,27 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, te check(e); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = matrix(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); auto q = r; t = std::move(r); BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); @@ -256,27 +256,27 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, te -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = vector(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); auto q = r; t = std::move(r); BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) * e.at(1)); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); @@ -293,43 +293,43 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, te -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = matrix(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); t = r + 3*r; - tensor_type s = r + 3*r; - tensor_type q = s + r + 3*r + s; // + 3*r + tensor s = r + 3*r; + tensor q = s + r + 3*r + s; // + 3*r BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( s.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); BOOST_CHECK ( !s.empty() ); BOOST_CHECK_NE ( s.data() , nullptr); BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0) ); BOOST_CHECK_EQUAL ( q.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); BOOST_CHECK ( !q.empty() ); BOOST_CHECK_NE ( q.data() , nullptr); @@ -352,43 +352,43 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_t -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; auto check = [](auto const& e) { - assert(e.size() == 2); - auto t = tensor_type{}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); + assert(ublas::size(e) == 2); + auto t = tensor(e); + auto r = vector(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), value{1}); t = r + 3*r; - tensor_type s = r + 3*r; - tensor_type q = s + r + 3*r + s; // + 3*r + tensor s = r + 3*r; + tensor q = s + r + 3*r + s; // + 3*r BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size(e) ); BOOST_CHECK ( !t.empty() ); BOOST_CHECK_NE ( t.data() , nullptr); BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( s.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( s.size() , product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , e.size() ); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size(e) ); BOOST_CHECK ( !s.empty() ); BOOST_CHECK_NE ( s.data() , nullptr); BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0)*e.at(1) ); BOOST_CHECK_EQUAL ( q.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( q.size() , product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , e.size() ); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size(e) ); BOOST_CHECK ( !q.empty() ); BOOST_CHECK_NE ( q.data() , nullptr); @@ -407,30 +407,31 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_t -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, pair, test_types, fixture ) { - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::dynamic_tensor; - using matrix_type = typename tensor_type::matrix_type; - using vector_type = typename tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; + using vector = typename tensor::vector_type; auto check = [](auto const& e) { if(product(e) <= 2) return; - assert(e.size() == 2); - auto Q = tensor_type{e[0],1}; - auto A = matrix_type(e[0],e[1]); - auto b = vector_type(e[1]); - auto c = vector_type(e[0]); - std::iota(b.data().begin(),b.data().end(), 1); - std::fill(A.data().begin(),A.data().end(), 1); - std::fill(c.data().begin(),c.data().end(), 2); - std::fill(Q.begin(),Q.end(), 2); - - tensor_type T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; + + assert(ublas::size(e) == 2); + auto Q = tensor{e[0],1}; + auto A = matrix(e[0],e[1]); + auto b = vector(e[1]); + auto c = vector(e[0]); + std::iota(b.data().begin(),b.data().end(), value{1}); + std::fill(A.data().begin(),A.data().end(), value{1}); + std::fill(c.data().begin(),c.data().end(), value{2}); + std::fill(Q.begin(),Q.end(), value{2}); + + tensor T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; BOOST_CHECK_EQUAL ( T.extents().at(0) , Q.extents().at(0) ); BOOST_CHECK_EQUAL ( T.extents().at(1) , Q.extents().at(1)); @@ -440,11 +441,12 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, value, BOOST_CHECK ( !T.empty() ); BOOST_CHECK_NE ( T.data() , nullptr); - for(auto i = 0ul; i < T.size(); ++i){ - auto n = e[1]; - auto ab = n * (n+1) / 2; - BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); - } + const auto n = e[1]; + const auto ab = value(std::div(n*(n+1),2).quot); + const auto ref = ab+4*Q(0)+2*c(0); + BOOST_CHECK( std::all_of(T.begin(),T.end(), [ref](auto cc){ return ref == cc; }) ); + +// BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); }; diff --git a/test/tensor/utility.hpp b/test/tensor/utility.hpp index 6dc5f1a45..93571be2a 100644 --- a/test/tensor/utility.hpp +++ b/test/tensor/utility.hpp @@ -14,6 +14,8 @@ #define _BOOST_UBLAS_TEST_TENSOR_UTILITY_ #include +#include +#include template struct zip_helper; @@ -48,55 +50,38 @@ struct zip_helper, type1, types1...> template using zip = zip_helper,types...>; -template -struct for_each_tuple_impl{ - static_assert(sizeof...(Ts) > I, "Static Assert in boost::numeric::ublas::detail::for_each_tuple"); - auto operator()(std::tuple& t, CallBack call_back) - { - call_back(I,std::get(t)); - if constexpr(sizeof...(Ts) - 1 > I){ - for_each_tuple_impl it; - it(t,call_back); - } - } -}; - -template -auto for_each_tuple(std::tuple& t, CallBack call_back){ - if constexpr (std::tuple_size_v> == 0u ) - return; - for_each_tuple_impl<0,CallBack,Ts...> f; - f(t,call_back); +template +void for_each_in_tuple(std::tuple const& tuple, UnaryOp&& op) +{ + auto invoke_op_for_tuple = [&](std::index_sequence) { + (..., std::invoke(op, Is, std::get(tuple))); + }; + invoke_op_for_tuple(std::make_index_sequence>>{}); } +namespace boost::numeric::ublas +{ -template -struct list{ - static constexpr size_t size = sizeof...(Ts); -}; +template +void for_each_in_index(std::index_sequence, TA const& a, TB const& b, UnaryOp&& op) +{ + (..., std::invoke(op,a,b,std::index_sequence{}) ); +} -template -struct for_each_list_impl{ - constexpr decltype(auto) operator()(list l, CallBack call_back){ - using new_list = list; - using value_type = T; - call_back(I,value_type{}); - - if constexpr(new_list::size != 0){ - for_each_list_impl it; - it(new_list{},call_back); - } - } -}; +}// namespace boost::numeric::ublas +//template +//void for_each_in_tuple(std::index_sequence, UnaryOp&& op) +//{ +// auto invoke_op_for_tuple = [&](std::index_sequence) { +// (..., std::invoke(op, Is, Is)); +// }; + +// invoke_op_for_tuple(std::make_index_sequence::size()>{}); +//} -template -auto for_each_list(list l, CallBack call_back){ - for_each_list_impl<0,CallBack,Ts...> f; - f(l,call_back); -} #include From a58a3621e4ecc2590e166dfd036ac82f97a758ce Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Mon, 24 May 2021 21:57:50 +0200 Subject: [PATCH 02/40] rebasing subtensor. --- IDEs/qtcreator/include/tensor/tensor.pri | 3 + IDEs/qtcreator/test/test_tensor.pro | 13 +- IDEs/qtcreator/tests.pri | 2 +- .../tensor/slice_detail/type_traits_slice.hpp | 50 ++ include/boost/numeric/ublas/tensor/span.hpp | 251 ++++++ .../boost/numeric/ublas/tensor/strides.hpp | 99 +++ .../boost/numeric/ublas/tensor/subtensor.hpp | 742 ++++++++++++++++++ .../ublas/tensor/subtensor_utility.hpp | 210 +++++ include/boost/numeric/ublas/tensor/tags.hpp | 16 +- include/boost/numeric/ublas/tensor/tensor.hpp | 1 + test/tensor/test_algorithms.cpp | 2 +- test/tensor/test_span.cpp | 258 ++++++ test/tensor/test_subtensor.cpp | 575 ++++++++++++++ test/tensor/test_subtensor_utility.cpp | 394 ++++++++++ 14 files changed, 2604 insertions(+), 12 deletions(-) create mode 100644 include/boost/numeric/ublas/tensor/slice_detail/type_traits_slice.hpp create mode 100644 include/boost/numeric/ublas/tensor/span.hpp create mode 100644 include/boost/numeric/ublas/tensor/strides.hpp create mode 100644 include/boost/numeric/ublas/tensor/subtensor.hpp create mode 100644 include/boost/numeric/ublas/tensor/subtensor_utility.hpp create mode 100644 test/tensor/test_span.cpp create mode 100644 test/tensor/test_subtensor.cpp create mode 100644 test/tensor/test_subtensor_utility.cpp diff --git a/IDEs/qtcreator/include/tensor/tensor.pri b/IDEs/qtcreator/include/tensor/tensor.pri index 112376c11..91cd18b96 100644 --- a/IDEs/qtcreator/include/tensor/tensor.pri +++ b/IDEs/qtcreator/include/tensor/tensor.pri @@ -13,6 +13,9 @@ HEADERS += \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/operators_arithmetic.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/operators_comparison.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/ostream.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/span.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/subtensor.hpp \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/subtensor_utility.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tags.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/concepts.hpp \ diff --git a/IDEs/qtcreator/test/test_tensor.pro b/IDEs/qtcreator/test/test_tensor.pro index 8deee2f99..856775ee4 100644 --- a/IDEs/qtcreator/test/test_tensor.pro +++ b/IDEs/qtcreator/test/test_tensor.pro @@ -18,18 +18,10 @@ clang: QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp=libiomp5 gcc:QMAKE_CXXFLAGS_DEBUG = -g clang: QMAKE_CXXFLAGS_DEBUG =-g - #QMAKE_CXXFLAGS += --coverage BOOST_ROOT=../../../../../.. -#exists( $$BOOST_ROOT/boost-build.jam ) { -# message("Boost installed.") -# INCLUDEPATH += $${BOOST_ROOT}/libs/numeric/ublas/include -# LIBS += -L$${BOOST_ROOT}/stage/lib -lgomp -# QMAKE_RPATHDIR += $${BOOST_ROOT}/stage/lib -#} - QMAKE_RPATHDIR += $${BOOST_ROOT}/stage/lib INCLUDEPATH+=$$BOOST_ROOT/libs/numeric/ublas/include LIBS+=-L$${BOOST_ROOT}/stage/lib -lboost_unit_test_framework -lgomp @@ -76,4 +68,7 @@ SOURCES += \ $${TEST_DIR}/test_strides.cpp \ $${TEST_DIR}/test_tensor.cpp \ $${TEST_DIR}/test_tensor_matrix_vector.cpp \ - $${TEST_DIR}/test_extents_functions.cpp + $${TEST_DIR}/test_extents_functions.cpp \ + $${TEST_DIR}/test_span.cpp \ + $${TEST_DIR}/test_subtensor.cpp \ + $${TEST_DIR}/test_subtensor_utility.cpp diff --git a/IDEs/qtcreator/tests.pri b/IDEs/qtcreator/tests.pri index 04e131f59..33721d551 100644 --- a/IDEs/qtcreator/tests.pri +++ b/IDEs/qtcreator/tests.pri @@ -33,7 +33,7 @@ SUBDIRS += \ # test_triangular \ # triangular_access \ # triangular_layout \ - # test_tensor +# test_tensor #begin_end.file = test/begin_end.pro #comp_mat_erase.file = test/comp_mat_erase.pro diff --git a/include/boost/numeric/ublas/tensor/slice_detail/type_traits_slice.hpp b/include/boost/numeric/ublas/tensor/slice_detail/type_traits_slice.hpp new file mode 100644 index 000000000..54f39b792 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/slice_detail/type_traits_slice.hpp @@ -0,0 +1,50 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google +// + +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_TYPE_TRAITS_SLICE_HPP_ +#define _BOOST_NUMERIC_UBLAS_TENSOR_TYPE_TRAITS_SLICE_HPP_ + +#include +#include + +namespace boost::numeric::ublas::experimental { + + template + struct basic_slice; + + template + struct is_slice : std::false_type{}; + + template + inline static constexpr auto const is_slice_v = is_slice::value; + +} // namespace boost::numeric::ublas::span + +namespace boost::numeric::ublas::experimental { + + template + struct is_slice< basic_slice > : std::true_type{}; + +} // namespace boost::numeric::ublas::span + +namespace boost::numeric::ublas{ + + template + struct is_dynamic< experimental::basic_slice > : std::true_type{}; + + template + struct is_static< experimental::basic_slice > : std::true_type{}; + +} // namespace boost::numeric::ublas + + +#endif diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp new file mode 100644 index 000000000..f0875b60c --- /dev/null +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -0,0 +1,251 @@ +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which started as a Google Summer of Code project. +// + + +#ifndef _BOOST_UBLAS_TENSOR_SPAN_ +#define _BOOST_UBLAS_TENSOR_SPAN_ + +#include +#include +#include +#include + +namespace boost { +namespace numeric { +namespace ublas { +namespace tag { + +struct sliced {}; +struct strided {}; + +} +} +} +} + + +namespace boost::numeric::ublas { + +/** \class span + * \ingroup Core_Module + * + * \brief Selection operator class to initialize stl::multi_subarray + * + * This class is used to generate stl::multi_subarray from stl::multi_array and to + * work on views. + * \note zero based indexing is used. + * + */ + + + +//using offsets = std::vector; + +template +class span; + + +static constexpr inline std::size_t end = std::numeric_limits::max(); + +template<> +class span +{ +public: + using span_tag = tag::strided; + using value_type = std::size_t; + + // covers the complete range of one dimension + // e.g. a(:) + constexpr explicit span() + : first_{} + , last_ {} + , step_ {} + , size_ {} + {} + + + // covers a linear range of one dimension + // e.g. a(1:3:n) + span(value_type f, value_type s, value_type l) + : first_(f) + , last_ (l) + , step_ (s) + { + if(f == l){ + last_ = l; + size_ = value_type(1); + } + else { + if(s == 0 && f != l) + throw std::runtime_error("Error in span::span : cannot have a step_ equal to zero."); + + if(f > l) + throw std::runtime_error("Error in span::span: last_ is smaller than first"); + + last_ = l - ((l-f)%s); + size_ = (last_-first_)/s+value_type(1); + } + } + + // covers only one index of one dimension + // e.g. a(1) or a(end) + span(value_type n) + : span(n,1,n) + { + } + + span(span const& other) + : first_(other.first_) + , last_ (other.last_ ) + , step_ (other.step_ ) + , size_ (other.size_ ) + { + } + + span& operator=(span const& other) + { + first_ = other.first_; + last_ = other.last_ ; + step_ = other.step_ ; + size_ = other.size_ ; + return *this; + } + + inline auto first() const {return first_; } + inline auto last () const {return last_ ; } + inline auto step () const {return step_ ; } + inline auto size () const {return size_ ; } + + ~span() = default; + + inline value_type operator[] (std::size_t idx) const + { + return first_ + idx * step_; + } + + inline span operator()(const span &rhs) const + { + auto const& lhs = *this; + return span( + rhs.first_*lhs.step_ + lhs.first_, + lhs.step_ *rhs.step_, + rhs.last_ *lhs.step_ + lhs.first_ ); + } + +protected: + + value_type first_, last_ , step_, size_; +}; + +using strided_span = span; + +} // namespace + + +///////////// + +namespace boost::numeric::ublas { + +template<> +class span : + private span +{ + using super_type = span; +public: + using span_tag = tag::sliced; + using value_type = typename super_type::value_type; + constexpr explicit span() + : super_type() + { + } + + span(value_type f, value_type l) + : super_type(f, value_type(1), l ) + { + } + + span(value_type n) + : super_type(n) + { + } + + span(span const& other) + : super_type(other) + { + } + + inline span& operator=(const span &other) + { + super_type::operator=(other); + return *this; + } + + ~span() = default; + + inline value_type operator[] (std::size_t idx) const + { + return super_type::operator [](idx); + } + + inline auto first() const {return super_type::first(); } + inline auto last () const {return super_type::last (); } + inline auto step () const {return super_type::step (); } + inline auto size () const {return super_type::size (); } + + inline span operator()(const span &rhs) const + { + auto const& lhs = *this; + return span( rhs.first_ + lhs.first_, rhs.last_ + lhs.first_ ); + } +}; + +using sliced_span = span; + + +template +inline auto ran(unsigned_type f, unsigned_type l) +{ + return sliced_span(f,l); +} + +template +inline auto ran(unsigned_type f, unsigned_type s, unsigned_type l) +{ + return strided_span(f,s,l); +} + +} // namespace + + +template +std::ostream& operator<< (std::ostream& out, boost::numeric::ublas::span const& s) +{ + return out << "[" << s.first() << ":" << s.step() << ":" << s.last() << "]" << std::endl; +} + +template +inline bool operator==( + boost::numeric::ublas::span const& lhs, + boost::numeric::ublas::span const& rhs) +{ + return lhs.first() == rhs.first() && lhs.last() == rhs.last() && lhs.step() == rhs.step(); +} + + +template +inline bool operator!=( + boost::numeric::ublas::span const& lhs, + boost::numeric::ublas::span const& rhs) +{ + return lhs.first() != rhs.first() || lhs.last() != rhs.last() || lhs.step() != rhs.step(); +} + +#endif // FHG_range_H diff --git a/include/boost/numeric/ublas/tensor/strides.hpp b/include/boost/numeric/ublas/tensor/strides.hpp new file mode 100644 index 000000000..0dac93bb7 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/strides.hpp @@ -0,0 +1,99 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google +// +/// \file strides.hpp Definition for the basic_strides template class + +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_STRIDES_HPP_ +#define _BOOST_NUMERIC_UBLAS_TENSOR_STRIDES_HPP_ + +#include +#include +#include + +namespace boost::numeric::ublas{ + + template && is_strides_v + , int> = 0 + > + [[nodiscard]] inline + constexpr bool operator==(LStrides const& lhs, RStrides const& rhs) noexcept{ + static_assert( std::is_same_v, + "boost::numeric::ublas::operator==(LStrides,RStrides) : LHS value type should be same as RHS value type"); + + return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin()); + } + + template && is_strides_v + , int> = 0 + > + [[nodiscard]] inline + constexpr bool operator!=(LStrides const& lhs, RStrides const& rhs) noexcept{ + static_assert( std::is_same_v, + "boost::numeric::ublas::operator!=(LStrides,RStrides) : LHS value type should be same as RHS value type"); + return !( lhs == rhs ); + } + +} // namespace boost::numeric::ublas + + +namespace boost::numeric::ublas::detail { + + /** @brief Returns relative memory index with respect to a multi-index + * + * @code auto j = access(std::vector{3,4,5}, strides{shape{4,2,3},first_order}); @endcode + * + * @param[in] i multi-index of length p + * @param[in] w stride vector of length p + * @returns relative memory location depending on \c i and \c w + */ + template + [[nodiscard]] inline + constexpr auto access(std::vector const& i, Stride const& w) + { + static_assert( is_strides_v, + "boost::numeric::ublas::detail::access() : invalid type, type should be a strides"); + + const auto p = i.size(); + size_type sum = 0u; + for(auto r = 0u; r < p; ++r) + sum += i[r]*w[r]; + return sum; + } + + /** @brief Returns relative memory index with respect to a multi-index + * + * @code auto j = access(0, strides{shape{4,2,3},first_order}, 2,3,4); @endcode + * + * @param[in] i first element of the partial multi-index + * @param[in] is the following elements of the partial multi-index + * @param[in] sum the current relative memory index + * @returns relative memory location depending on \c i and \c w + */ + template + [[nodiscard]] + constexpr auto access(std::size_t sum, Stride const& w, std::size_t i, size_types ... is) + { + static_assert( is_strides_v, + "boost::numeric::ublas::detail::access() : invalid type, type should be a strides"); + sum += i*w[r]; + if constexpr (sizeof...(is) == 0) + return sum; + else + return detail::access(sum,w,std::forward(is)...); + } + +} // namespace boost::numeric::ublas::detail + +#endif diff --git a/include/boost/numeric/ublas/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/subtensor.hpp new file mode 100644 index 000000000..d03130610 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/subtensor.hpp @@ -0,0 +1,742 @@ +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which firsted as a Google Summer of Code project. +// + + +/// \file subtensor.hpp Definition for the tensor template class + +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP_ +#define _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP_ + + + + +#include +#include +#include +#include +#include + + +namespace boost::numeric::ublas { + +template +class dynamic_tensor; + +template +class matrix; + +template +class vector; + + + + + +/** @brief A view of a dense tensor of values of type \c T. + * + * @tparam T type of the objects stored in the tensor (like int, double, complex,...) + * @tparam F + * @tparam A The type of the storage array of the tensor. Default is \c unbounded_array. \c and \c std::vector can also be used +*/ +template +class subtensor; + + +/** @brief A sliced view of a dense tensor of values of type \c T. + * + * For a \f$n\f$-dimensional tensor \f$v\f$ and \f$0\leq i < n\f$ every element \f$v_i\f$ is mapped + * to the \f$i\f$-th element of the container. A storage type \c A can be specified which defaults to \c unbounded_array. + * Elements are constructed by \c A, which need not initialise their value. + * + * @tparam T type of the objects stored in the tensor (like int, double, complex,...) + * @tparam F type of the layout which can be either + * @tparam A The type of the storage array of the tensor. Default is \c unbounded_array. \c and \c std::vector can also be used + */ +template +class subtensor > + : public detail::tensor_expression< + subtensor> , + subtensor> > +{ + + static_assert( std::is_same::value || std::is_same::value, + "boost::numeric::tensor template class only supports first- or last-order storage formats."); + + using tensor_type = dynamic_tensor; + using self_type = subtensor; +public: + + using domain_tag = tag::sliced; + + using span_type = span; + + template + using tensor_expression_type = detail::tensor_expression; + + template + using matrix_expression_type = matrix_expression; + + template + using vector_expression_type = vector_expression; + + using super_type = tensor_expression_type; + +// static_assert(std::is_same_v, detail::tensor_expression,tensor>>, "tensor_expression_type"); + + using array_type = typename tensor_type::array_type; + using layout_type = typename tensor_type::layout_type; + + using size_type = typename tensor_type::size_type; + using difference_type = typename tensor_type::difference_type; + using value_type = typename tensor_type::value_type; + + using reference = typename tensor_type::reference; + using const_reference = typename tensor_type::const_reference; + + using pointer = typename tensor_type::pointer; + using const_pointer = typename tensor_type::const_pointer; + +// using iterator = typename array_type::iterator; +// using const_iterator = typename array_type::const_iterator; + +// using reverse_iterator = typename array_type::reverse_iterator; +// using const_reverse_iterator = typename array_type::const_reverse_iterator; + + using tensor_temporary_type = self_type; + using storage_category = dense_tag; + + using strides_type = basic_strides; + using extents_type = basic_extents; + + using matrix_type = matrix; + using vector_type = vector; + + + + /** @brief Deleted constructor of a subtensor */ + subtensor () = delete; + + /** @brief Constructs a tensor view from a tensor without any range. + * + */ + BOOST_UBLAS_INLINE + subtensor (tensor_type& t) + : super_type () + , spans_ () + , extents_ (t.extents()) + , strides_ (t.strides()) + , span_strides_ (t.strides()) + , data_ (t.data()) + { + } + + template + subtensor(tensor_type& t, span_types&& ... spans) + : super_type () + , spans_ (detail::generate_span_vector(t.extents(),std::forward(spans)...)) + , extents_ (detail::compute_extents(spans_)) + , strides_ (extents_) + , span_strides_ (detail::compute_span_strides(t.strides(),spans_)) + , data_ {t.data() + detail::compute_offset(t.strides(), spans_)} + { +// if( m == nullptr) +// throw std::length_error("Error in tensor_view::tensor_view : multi_array_type is nullptr."); +// if( t == nullptr) +// throw std::length_error("Error in tensor_view::tensor_view : tensor_type is nullptr."); + } + + + /** @brief Constructs a tensor view from a tensor without any range. + * + * @note can be regarded as a pointer to a tensor + */ + explicit + subtensor (tensor_type const& t) + : super_type () + , spans_() + , extents_ (t.extents()) + , strides_ (t.strides()) + , span_strides_(t.strides()) + , data_ (t.data()) + { + } + +#if 0 + /** @brief Constructs a tensor with a \c shape and initiates it with one-dimensional data + * + * @code tensor A{extents{4,2,3}, array }; @endcode + * + * + * @param s initial tensor dimension extents + * @param a container of \c array_type that is copied according to the storage layout + */ + BOOST_UBLAS_INLINE + tensor (extents_type const& s, const array_type &a) + : tensor_expression_type() //tensor_container() + , extents_ (s) + , strides_ (extents_) + , data_ (a) + { + if(this->extents_.product() != this->data_.size()) + throw std::runtime_error("Error in boost::numeric::ublas::tensor: size of provided data and specified extents do not match."); + } + + + + /** @brief Constructs a tensor using a shape tuple and initiates it with a value. + * + * @code tensor A{extents{4,2,3}, 1 }; @endcode + * + * @param e initial tensor dimension extents + * @param i initial value of all elements of type \c value_type + */ + BOOST_UBLAS_INLINE + tensor (extents_type const& e, const value_type &i) + : tensor_expression_type() //tensor_container () + , extents_ (e) + , strides_ (extents_) + , data_ (extents_.product(), i) + {} + + + + /** @brief Constructs a tensor from another tensor + * + * @param v tensor to be copied. + */ + BOOST_UBLAS_INLINE + tensor (const tensor &v) + : tensor_expression_type() + , extents_ (v.extents_) + , strides_ (v.strides_) + , data_ (v.data_ ) + {} + + + + /** @brief Constructs a tensor from another tensor + * + * @param v tensor to be moved. + */ + BOOST_UBLAS_INLINE + tensor (tensor &&v) + : tensor_expression_type() //tensor_container () + , extents_ (std::move(v.extents_)) + , strides_ (std::move(v.strides_)) + , data_ (std::move(v.data_ )) + {} + + + /** @brief Constructs a tensor with a matrix + * + * \note Initially the tensor will be two-dimensional. + * + * @param v matrix to be copied. + */ + BOOST_UBLAS_INLINE + tensor (const matrix_type &v) + : tensor_expression_type() + , extents_ () + , strides_ () + , data_ (v.data()) + { + if(!data_.empty()){ + extents_ = extents_type{v.size1(),v.size2()}; + strides_ = strides_type(extents_); + } + } + + /** @brief Constructs a tensor with a matrix + * + * \note Initially the tensor will be two-dimensional. + * + * @param v matrix to be moved. + */ + BOOST_UBLAS_INLINE + tensor (matrix_type &&v) + : tensor_expression_type() + , extents_ {} + , strides_ {} + , data_ {} + { + if(v.size1()*v.size2() != 0){ + extents_ = extents_type{v.size1(),v.size2()}; + strides_ = strides_type(extents_); + data_ = std::move(v.data()); + } + } + + /** @brief Constructs a tensor using a \c vector + * + * @note It is assumed that vector is column vector + * @note Initially the tensor will be one-dimensional. + * + * @param v vector to be copied. + */ + BOOST_UBLAS_INLINE + tensor (const vector_type &v) + : tensor_expression_type() + , extents_ () + , strides_ () + , data_ (v.data()) + { + if(!data_.empty()){ + extents_ = extents_type{data_.size(),1}; + strides_ = strides_type(extents_); + } + } + + /** @brief Constructs a tensor using a \c vector + * + * @param v vector to be moved. + */ + BOOST_UBLAS_INLINE + tensor (vector_type &&v) + : tensor_expression_type() + , extents_ {} + , strides_ {} + , data_ {} + { + if(v.size() != 0){ + extents_ = extents_type{v.size(),1}; + strides_ = strides_type(extents_); + data_ = std::move(v.data()); + } + } + + + /** @brief Constructs a tensor with another tensor with a different layout + * + * @param other tensor with a different layout to be copied. + */ + BOOST_UBLAS_INLINE + template + tensor (const tensor &other) + : tensor_expression_type () + , extents_ (other.extents()) + , strides_ (other.extents()) + , data_ (other.extents().product()) + { + copy(this->rank(), this->extents().data(), + this->data(), this->strides().data(), + other.data(), other.strides().data()); + } + + /** @brief Constructs a tensor with an tensor expression + * + * @code tensor A = B + 3 * C; @endcode + * + * @note type must be specified of tensor must be specified. + * @note dimension extents are extracted from tensors within the expression. + * + * @param expr tensor expression + */ + BOOST_UBLAS_INLINE + template + tensor (const tensor_expression_type &expr) + : tensor_expression_type () + , extents_ ( detail::retrieve_extents(expr) ) + , strides_ ( extents_ ) + , data_ ( extents_.product() ) + { + static_assert( detail::has_tensor_types>::value, + "Error in boost::numeric::ublas::tensor: expression does not contain a tensor. cannot retrieve shape."); + detail::eval( *this, expr ); + } + + /** @brief Constructs a tensor with a matrix expression + * + * @code tensor A = B + 3 * C; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr matrix expression + */ + BOOST_UBLAS_INLINE + template + tensor (const matrix_expression_type &expr) + : tensor( matrix_type ( expr ) ) + { + } + + /** @brief Constructs a tensor with a vector expression + * + * @code tensor A = b + 3 * b; @endcode + * + * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. + * @note extents are automatically extracted from the temporary matrix + * + * @param expr vector expression + */ + BOOST_UBLAS_INLINE + template + tensor (const vector_expression_type &expr) + : tensor( vector_type ( expr ) ) + { + } + + /** @brief Evaluates the tensor_expression and assigns the results to the tensor + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must conform with this tensor. + * + * @param expr expression that is evaluated. + */ + BOOST_UBLAS_INLINE + template + tensor &operator = (const tensor_expression_type &expr) + { + detail::eval(*this, expr); + return *this; + } + + tensor& operator=(tensor other) + { + swap (*this, other); + return *this; + } + + tensor& operator=(const_reference v) + { + std::fill(this->begin(), this->end(), v); + return *this; + } +#endif + + + /** @brief Returns true if the subtensor is empty (\c size==0) */ + BOOST_UBLAS_INLINE + bool empty () const { + return this->size() == size_type(0); + } + + + /** @brief Returns the size of the subtensor */ + BOOST_UBLAS_INLINE + size_type size () const { + return product(this->extents_); + } + + /** @brief Returns the size of the subtensor */ + BOOST_UBLAS_INLINE + size_type size (size_type r) const { + return this->extents_.at(r); + } + + /** @brief Returns the number of dimensions/modes of the subtensor */ + BOOST_UBLAS_INLINE + size_type rank () const { + return this->extents_.size(); + } + + /** @brief Returns the number of dimensions/modes of the subtensor */ + BOOST_UBLAS_INLINE + size_type order () const { + return this->extents_.size(); + } + + /** @brief Returns the strides of the subtensor */ + BOOST_UBLAS_INLINE + auto const& strides () const { + return this->strides_; + } + + /** @brief Returns the span strides of the subtensor */ + BOOST_UBLAS_INLINE + auto const& span_strides () const { + return this->span_strides_; + } + + /** @brief Returns the span strides of the subtensor */ + BOOST_UBLAS_INLINE + auto const& spans () const { + return this->spans_; + } + + + /** @brief Returns the extents of the subtensor */ + BOOST_UBLAS_INLINE + auto const& extents () const { + return this->extents_; + } + + + /** @brief Returns a \c const reference to the container. */ + BOOST_UBLAS_INLINE + const_pointer data () const { + return this->data_; + } + + /** @brief Returns a \c const reference to the container. */ + BOOST_UBLAS_INLINE + pointer data () { + return this->data_; + } + + + + +#if 0 + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + BOOST_UBLAS_INLINE + const_reference operator [] (size_type i) const { + return this->data_[i]; + } + + /** @brief Element access using a single index. + * + * + * @code A[i] = a; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + BOOST_UBLAS_INLINE + reference operator [] (size_type i) + { + return this->data_[i]; + } + + + /** @brief Element access using a multi-index or single-index. + * + * + * @code auto a = A.at(i,j,k); @endcode or + * @code auto a = A.at(i); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + template + BOOST_UBLAS_INLINE + const_reference at (size_type i, size_types ... is) const { + if constexpr (sizeof...(is) == 0) + return this->data_[i]; + else + return this->data_[detail::access<0ul>(size_type(0),this->strides_,i,std::forward(is)...)]; + } + + /** @brief Element access using a multi-index or single-index. + * + * + * @code A.at(i,j,k) = a; @endcode or + * @code A.at(i) = a; @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + BOOST_UBLAS_INLINE + template + reference at (size_type i, size_types ... is) { + if constexpr (sizeof...(is) == 0) + return this->data_[i]; + else + return this->data_[detail::access<0ul>(size_type(0),this->strides_,i,std::forward(is)...)]; + } + + + + + /** @brief Element access using a single index. + * + * + * @code A(i) = a; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + BOOST_UBLAS_INLINE + const_reference operator()(size_type i) const { + return this->data_[i]; + } + + + /** @brief Element access using a single index. + * + * @code A(i) = a; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + BOOST_UBLAS_INLINE + reference operator()(size_type i){ + return this->data_[i]; + } + + + + + /** @brief Generates a tensor index for tensor contraction + * + * + * @code auto Ai = A(_i,_j,k); @endcode + * + * @param i placeholder + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() + */ + BOOST_UBLAS_INLINE + template + decltype(auto) operator() (index::index_type p, index_types ... ps) const + { + constexpr auto N = sizeof...(ps)+1; + if( N != this->rank() ) + throw std::runtime_error("Error in boost::numeric::ublas::operator(): size of provided index_types does not match with the rank."); + + return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); + } + + + + + + /** @brief Reshapes the tensor + * + * + * (1) @code A.reshape(extents{m,n,o}); @endcode or + * (2) @code A.reshape(extents{m,n,o},4); @endcode + * + * If the size of this smaller than the specified extents than + * default constructed (1) or specified (2) value is appended. + * + * @note rank of the tensor might also change. + * + * @param e extents with which the tensor is reshaped. + * @param v value which is appended if the tensor is enlarged. + */ + BOOST_UBLAS_INLINE + void reshape (extents_type const& e, value_type v = value_type{}) + { + this->extents_ = e; + this->strides_ = strides_type(this->extents_); + + if(e.product() != this->size()) + this->data_.resize (this->extents_.product(), v); + } + + + friend void swap(tensor& lhs, tensor& rhs) { + std::swap(lhs.data_ , rhs.data_ ); + std::swap(lhs.extents_, rhs.extents_); + std::swap(lhs.strides_, rhs.strides_); + } + + + /// \brief return an iterator on the first element of the tensor + BOOST_UBLAS_INLINE + const_iterator begin () const { + return data_.begin (); + } + + /// \brief return an iterator on the first element of the tensor + BOOST_UBLAS_INLINE + const_iterator cbegin () const { + return data_.cbegin (); + } + + /// \brief return an iterator after the last element of the tensor + BOOST_UBLAS_INLINE + const_iterator end () const { + return data_.end(); + } + + /// \brief return an iterator after the last element of the tensor + BOOST_UBLAS_INLINE + const_iterator cend () const { + return data_.cend (); + } + + /// \brief Return an iterator on the first element of the tensor + BOOST_UBLAS_INLINE + iterator begin () { + return data_.begin(); + } + + /// \brief Return an iterator at the end of the tensor + BOOST_UBLAS_INLINE + iterator end () { + return data_.end(); + } + + /// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor) + BOOST_UBLAS_INLINE + const_reverse_iterator rbegin () const { + return data_.rbegin(); + } + + /// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor) + BOOST_UBLAS_INLINE + const_reverse_iterator crbegin () const { + return data_.crbegin(); + } + + /// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor) + BOOST_UBLAS_INLINE + const_reverse_iterator rend () const { + return data_.rend(); + } + + /// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor) + BOOST_UBLAS_INLINE + const_reverse_iterator crend () const { + return data_.crend(); + } + + /// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor) + BOOST_UBLAS_INLINE + reverse_iterator rbegin () { + return data_.rbegin(); + } + + /// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor) + BOOST_UBLAS_INLINE + reverse_iterator rend () { + return data_.rend(); + } + + +#if 0 + // ------------- + // Serialization + // ------------- + + /// Serialize a tensor into and archive as defined in Boost + /// \param ar Archive object. Can be a flat file, an XML file or any other stream + /// \param file_version Optional file version (not yet used) + template + void serialize(Archive & ar, const unsigned int /* file_version */){ + ar & serialization::make_nvp("data",data_); + } +#endif + +#endif + +private: + + std::vector spans_; + extents_type extents_; + strides_type strides_; + strides_type span_strides_; + pointer data_; +}; + + +} // namespaces + + + + + + +#endif diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp new file mode 100644 index 000000000..6c42763d0 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -0,0 +1,210 @@ +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which firsted as a Google Summer of Code project. +// + + +/// \file subtensor_utility.hpp Definition for the tensor template class + +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ +#define _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ + +#include +#include +#include + +#include +#include +#include + + +namespace boost::numeric::ublas::detail { + + +/*! @brief Computes span strides for a subtensor + * + * span stride v is computed according to: v[i] = w[i]*s[i], where + * w[i] is the i-th stride of the tensor + * s[i] is the step size of the i-th span + * + * @param[in] strides strides of the tensor, the subtensor refers to + * @param[in] spans vector of spans of the subtensor +*/ +template +auto compute_span_strides(strides_type const& strides, spans_type const& spans) +{ + if(strides.size() != spans.size()) + throw std::runtime_error("Error in boost::numeric::ublas::subtensor::compute_span_strides(): tensor strides.size() != spans.size()"); + + using base_type = typename strides_type::base_type; + auto span_strides = base_type(spans.size()); + + std::transform(strides.begin(), strides.end(), spans.begin(), span_strides.begin(), + [](auto w, auto const& s) { return w * s.step(); } ); + + return strides_type( span_strides ); +} + +/*! @brief Computes the data pointer offset for a subtensor + * + * offset is computed according to: sum ( f[i]*w[i] ), where + * f[i] is the first element of the i-th span + * w[i] is the i-th stride of the tensor + * + * @param[in] strides strides of the tensor, the subtensor refers to + * @param[in] spans vector of spans of the subtensor +*/ +template +auto compute_offset(strides_type const& strides, spans_type const& spans) +{ + if(strides.size() != spans.size()) + throw std::runtime_error("Error in boost::numeric::ublas::subtensor::offset(): tensor strides.size() != spans.size()"); + + using value_type = typename strides_type::value_type; + + return std::inner_product(spans.begin(), spans.end(), strides.begin(), value_type(0), + std::plus(), [](auto const& s, value_type w) {return s.first() * w; } ); +} + + +/*! @brief Computes the extents of the subtensor. + * + * i-th extent is given by span[i].size() + * + * @param[in] spans vector of spans of the subtensor + */ +template +auto compute_extents(spans_type const& spans) +{ + using extents_t = basic_extents; + using base_type = typename extents_t::base_type; + if(spans.empty()) + return extents_t{}; + auto extents = base_type(spans.size()); + std::transform(spans.begin(), spans.end(), extents.begin(), [](auto const& s) { return s.size(); } ); + return extents_t( extents ); +} + + +/*! @brief Auxiliary function for subtensor which possibly transforms a span instance + * + * transform_span(span() ,4) -> span(0,3) + * transform_span(span(1,1) ,4) -> span(1,1) + * transform_span(span(1,3) ,4) -> span(1,3) + * transform_span(span(2,end),4) -> span(2,3) + * transform_span(span(end) ,4) -> span(3,3) + * + * @note span is zero-based indexed. + * + * @param[in] s span that is going to be transformed + * @param[in] extent extent that is maybe used for the tranformation + */ +template +auto transform_span(span const& s, size_type const extent) +{ + using span_type = span; + + size_type first = s.first(); + size_type last = s.last (); + size_type size = s.size (); + + auto const extent0 = extent-1; + + auto constexpr is_sliced = std::is_same::value; + + + if constexpr ( is_sliced ){ + if(size == 0) return span_type(0 , extent0); + else if(first== end) return span_type(extent0 , extent0); + else if(last == end) return span_type(first , extent0); + else return span_type(first , last ); + } + else { + size_type step = s.step (); + if(size == 0) return span_type(0 , size_type(1), extent0); + else if(first== end) return span_type(extent0 , step, extent0); + else if(last == end) return span_type(first , step, extent0); + else return span_type(first , step, last ); + } +} + + +template +void transform_spans_impl (basic_extents const& extents, std::array& span_array, std::size_t arg, span_types&& ... spans ); + +template +void transform_spans_impl(basic_extents const& extents, std::array& span_array, span const& s, span_types&& ... spans) +{ + std::get(span_array) = transform_span(s, extents[r]); + static constexpr auto nspans = sizeof...(spans); + static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); + if constexpr (nspans>0) + transform_spans_impl(extents, span_array, std::forward(spans)...); +} + +template +void transform_spans_impl (basic_extents const& extents, std::array& span_array, std::size_t arg, span_types&& ... spans ) +{ + static constexpr auto nspans = sizeof...(spans); + static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); + std::get(span_array) = transform_span(span_type(arg), extents[r]); + if constexpr (nspans>0) + transform_spans_impl(extents, span_array, std::forward(spans) ... ); + +} + + +/*! @brief Auxiliary function for subtensor that generates array of spans + * + * generate_span_array(shape(4,3,5,2), span(), 1, span(2,end), end ) + * -> std::array (span(0,3), span(1,1), span(2,4),span(1,1)) + * + * @note span is zero-based indexed. + * + * @param[in] extents of the tensor + * @param[in] spans spans with which the subtensor is created + */ +template +auto generate_span_array(basic_extents const& extents, span_types&& ... spans) +{ + constexpr static auto n = sizeof...(spans); + if(extents.size() != n) + throw std::runtime_error("Error in boost::numeric::ublas::generate_span_vector() when creating subtensor: the number of spans does not match with the tensor rank."); + std::array span_array; + if constexpr (n>0) + transform_spans_impl<0>( extents, span_array, std::forward(spans)... ); + return span_array; +} + + +/*! @brief Auxiliary function for subtensor that generates array of spans + * + * generate_span_array(shape(4,3,5,2), span(), 1, span(2,end), end ) + * -> std::array (span(0,3), span(1,1), span(2,4),span(1,1)) + * + * @note span is zero-based indexed. + * + * @param[in] extents of the tensor + * @param[in] spans spans with which the subtensor is created + */ +template +auto generate_span_vector(basic_extents const& extents, span_types&& ... spans) +{ + auto span_array = generate_span_array(extents,std::forward(spans)...); + return std::vector(span_array.begin(), span_array.end()); +} + + +} // namespace boost::numeric::ublas::detail + + + + + +#endif diff --git a/include/boost/numeric/ublas/tensor/tags.hpp b/include/boost/numeric/ublas/tensor/tags.hpp index 7774f9ccb..3459b80da 100644 --- a/include/boost/numeric/ublas/tensor/tags.hpp +++ b/include/boost/numeric/ublas/tensor/tags.hpp @@ -6,11 +6,13 @@ // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // -// #ifndef BOOST_UBLAS_TENSOR_TAGS_HPP #define BOOST_UBLAS_TENSOR_TAGS_HPP +#include "../fwd.hpp" + + namespace boost::numeric::ublas{ struct tensor_tag{}; @@ -26,4 +28,16 @@ namespace boost::numeric::ublas{ } // namespace boost::numeric::ublas +namespace detail::tag { + +struct unit_access {}; +struct non_unit_access{}; + +} // namespace boost::numeric::tags + + +} // namespace boost::numeric::ublas + + #endif // BOOST_UBLAS_TENSOR_TAGS_HPP + diff --git a/include/boost/numeric/ublas/tensor/tensor.hpp b/include/boost/numeric/ublas/tensor/tensor.hpp index 02ceaa53a..9d194fe7b 100644 --- a/include/boost/numeric/ublas/tensor/tensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor.hpp @@ -20,3 +20,4 @@ #include "tensor/tensor_static.hpp" #endif // BOOST_UBLAS_TENSOR_TENSOR_HPP + diff --git a/test/tensor/test_algorithms.cpp b/test/tensor/test_algorithms.cpp index 477ee1e0c..8f1a3b225 100644 --- a/test/tensor/test_algorithms.cpp +++ b/test/tensor/test_algorithms.cpp @@ -25,7 +25,7 @@ BOOST_AUTO_TEST_SUITE ( test_tensor_algorithms/*, * boost::unit_test::depends_on("test_shape_dynamic") * boost::unit_test::depends_on("test_strides")*/ ) -// BOOST_AUTO_TEST_SUITE ( test_tensor_algorithms) +BOOST_AUTO_TEST_SUITE ( test_tensor_algorithms) using test_types = zip>::with_t; diff --git a/test/tensor/test_span.cpp b/test/tensor/test_span.cpp new file mode 100644 index 000000000..1b1da2a63 --- /dev/null +++ b/test/tensor/test_span.cpp @@ -0,0 +1,258 @@ +// Copyright (c) 2018 Cem Bassoy +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which started as a Google Summer of Code project. +// + +#include +#include +#include + + +BOOST_AUTO_TEST_SUITE( span_testsuite ); + +struct fixture { + using span_type = boost::numeric::ublas::strided_span; + + fixture() : + spans { + span_type{}, // 0 + span_type(0,0,0), // 1 + span_type(0,1,0), // 2 + span_type(0,1,2), // 3 + span_type(1,1,2), // 4 + span_type(0,2,4), // 5 + span_type(1,2,4), // 6 + span_type(1,3,5), // 7 + span_type(1,3,7) // 8 + } + {} + std::vector spans; +}; + + + +BOOST_FIXTURE_TEST_CASE( ctor_test, fixture ) +{ + using span_type = boost::numeric::ublas::strided_span; + + BOOST_CHECK_EQUAL (spans[0].first(),0); + BOOST_CHECK_EQUAL (spans[0].step (),0); + BOOST_CHECK_EQUAL (spans[0].last (),0); + BOOST_CHECK_EQUAL (spans[0].size (),0); + + BOOST_CHECK_EQUAL (spans[1].first(),0); + BOOST_CHECK_EQUAL (spans[1].step (),0); + BOOST_CHECK_EQUAL (spans[1].last (),0); + BOOST_CHECK_EQUAL (spans[1].size (),1); + + BOOST_CHECK_EQUAL (spans[2].first(),0); + BOOST_CHECK_EQUAL (spans[2].step (),1); + BOOST_CHECK_EQUAL (spans[2].last (),0); + BOOST_CHECK_EQUAL (spans[2].size (),1); + + BOOST_CHECK_EQUAL (spans[3].first(),0); + BOOST_CHECK_EQUAL (spans[3].step (),1); + BOOST_CHECK_EQUAL (spans[3].last (),2); + BOOST_CHECK_EQUAL (spans[3].size (),3); + + BOOST_CHECK_EQUAL (spans[4].first(),1); + BOOST_CHECK_EQUAL (spans[4].step (),1); + BOOST_CHECK_EQUAL (spans[4].last (),2); + BOOST_CHECK_EQUAL (spans[4].size (),2); + + BOOST_CHECK_EQUAL (spans[5].first(),0); + BOOST_CHECK_EQUAL (spans[5].step (),2); + BOOST_CHECK_EQUAL (spans[5].last (),4); + BOOST_CHECK_EQUAL (spans[5].size (),3); + + BOOST_CHECK_EQUAL (spans[6].first(),1); + BOOST_CHECK_EQUAL (spans[6].step (),2); + BOOST_CHECK_EQUAL (spans[6].last (),3); + BOOST_CHECK_EQUAL (spans[6].size (),2); + + BOOST_CHECK_EQUAL (spans[7].first(),1); + BOOST_CHECK_EQUAL (spans[7].step (),3); + BOOST_CHECK_EQUAL (spans[7].last (),4); + BOOST_CHECK_EQUAL (spans[7].size (),2); + + BOOST_CHECK_EQUAL (spans[8].first(),1); + BOOST_CHECK_EQUAL (spans[8].step (),3); + BOOST_CHECK_EQUAL (spans[8].last (),7); + BOOST_CHECK_EQUAL (spans[8].size (),3); + + + BOOST_CHECK_THROW ( span_type( 1,0,3 ), std::runtime_error ); + BOOST_CHECK_THROW ( span_type( 1,2,0 ), std::runtime_error ); + +} + + + +BOOST_FIXTURE_TEST_CASE( copy_ctor_test, fixture ) +{ + using span_type = boost::numeric::ublas::strided_span; + + + BOOST_CHECK_EQUAL (span_type(spans[0]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[0]).step (),0); + BOOST_CHECK_EQUAL (span_type(spans[0]).last (),0); + BOOST_CHECK_EQUAL (span_type(spans[0]).size (),0); + + BOOST_CHECK_EQUAL (span_type(spans[1]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[1]).step (),0); + BOOST_CHECK_EQUAL (span_type(spans[1]).last (),0); + BOOST_CHECK_EQUAL (span_type(spans[1]).size (),1); + + BOOST_CHECK_EQUAL (span_type(spans[2]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[2]).step (),1); + BOOST_CHECK_EQUAL (span_type(spans[2]).last (),0); + BOOST_CHECK_EQUAL (span_type(spans[2]).size (),1); + + BOOST_CHECK_EQUAL (span_type(spans[3]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[3]).step (),1); + BOOST_CHECK_EQUAL (span_type(spans[3]).last (),2); + BOOST_CHECK_EQUAL (span_type(spans[3]).size (),3); + + BOOST_CHECK_EQUAL (span_type(spans[4]).first(),1); + BOOST_CHECK_EQUAL (span_type(spans[4]).step (),1); + BOOST_CHECK_EQUAL (span_type(spans[4]).last (),2); + BOOST_CHECK_EQUAL (span_type(spans[4]).size (),2); + + + BOOST_CHECK_EQUAL (span_type(spans[5]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[5]).step (),2); + BOOST_CHECK_EQUAL (span_type(spans[5]).last (),4); + BOOST_CHECK_EQUAL (span_type(spans[5]).size (),3); + + BOOST_CHECK_EQUAL (span_type(spans[6]).first(),1); + BOOST_CHECK_EQUAL (span_type(spans[6]).step (),2); + BOOST_CHECK_EQUAL (span_type(spans[6]).last (),3); + BOOST_CHECK_EQUAL (span_type(spans[6]).size (),2); + + BOOST_CHECK_EQUAL (span_type(spans[7]).first(),1); + BOOST_CHECK_EQUAL (span_type(spans[7]).step (),3); + BOOST_CHECK_EQUAL (span_type(spans[7]).last (),4); + BOOST_CHECK_EQUAL (span_type(spans[7]).size (),2); + + BOOST_CHECK_EQUAL (span_type(spans[8]).first(),1); + BOOST_CHECK_EQUAL (span_type(spans[8]).step (),3); + BOOST_CHECK_EQUAL (span_type(spans[8]).last (),7); + BOOST_CHECK_EQUAL (span_type(spans[8]).size (),3); + + +} + + +BOOST_FIXTURE_TEST_CASE( assignment_operator_test, fixture ) +{ + auto c0 = spans[1]; + BOOST_CHECK_EQUAL ((c0=spans[0]).first(),0); + BOOST_CHECK_EQUAL ((c0=spans[0]).step (),0); + BOOST_CHECK_EQUAL ((c0=spans[0]).last (),0); + BOOST_CHECK_EQUAL ((c0=spans[0]).size (),0); + + auto c1 = spans[2]; + BOOST_CHECK_EQUAL ((c1=spans[1]).first(),0); + BOOST_CHECK_EQUAL ((c1=spans[1]).step (),0); + BOOST_CHECK_EQUAL ((c1=spans[1]).last (),0); + BOOST_CHECK_EQUAL ((c1=spans[1]).size (),1); + + auto c2 = spans[3]; + BOOST_CHECK_EQUAL ((c2=spans[2]).first(),0); + BOOST_CHECK_EQUAL ((c2=spans[2]).step (),1); + BOOST_CHECK_EQUAL ((c2=spans[2]).last (),0); + BOOST_CHECK_EQUAL ((c2=spans[2]).size (),1); + + auto c3 = spans[4]; + BOOST_CHECK_EQUAL ((c3=spans[3]).first(),0); + BOOST_CHECK_EQUAL ((c3=spans[3]).step (),1); + BOOST_CHECK_EQUAL ((c3=spans[3]).last (),2); + BOOST_CHECK_EQUAL ((c3=spans[3]).size (),3); + + auto c4 = spans[5]; + BOOST_CHECK_EQUAL ((c4=spans[4]).first(),1); + BOOST_CHECK_EQUAL ((c4=spans[4]).step (),1); + BOOST_CHECK_EQUAL ((c4=spans[4]).last (),2); + BOOST_CHECK_EQUAL ((c4=spans[4]).size (),2); + + auto c5 = spans[6]; + BOOST_CHECK_EQUAL ((c5=spans[5]).first(),0); + BOOST_CHECK_EQUAL ((c5=spans[5]).step (),2); + BOOST_CHECK_EQUAL ((c5=spans[5]).last (),4); + BOOST_CHECK_EQUAL ((c5=spans[5]).size (),3); + + auto c6 = spans[7]; + BOOST_CHECK_EQUAL ((c6=spans[6]).first(),1); + BOOST_CHECK_EQUAL ((c6=spans[6]).step (),2); + BOOST_CHECK_EQUAL ((c6=spans[6]).last (),3); + BOOST_CHECK_EQUAL ((c6=spans[6]).size (),2); + + auto c7 = spans[8]; + BOOST_CHECK_EQUAL ((c7=spans[7]).first(),1); + BOOST_CHECK_EQUAL ((c7=spans[7]).step (),3); + BOOST_CHECK_EQUAL ((c7=spans[7]).last (),4); + BOOST_CHECK_EQUAL ((c7=spans[7]).size (),2); + +} + +BOOST_FIXTURE_TEST_CASE( access_operator_test, fixture ) +{ + + BOOST_CHECK_EQUAL(spans[0][0], 0); + + BOOST_CHECK_EQUAL(spans[1][0], 0); + + BOOST_CHECK_EQUAL(spans[2][0], 0); + + BOOST_CHECK_EQUAL(spans[3][0], 0); + BOOST_CHECK_EQUAL(spans[3][1], 1); + BOOST_CHECK_EQUAL(spans[3][2], 2); + + BOOST_CHECK_EQUAL(spans[4][0], 1); + BOOST_CHECK_EQUAL(spans[4][1], 2); + + BOOST_CHECK_EQUAL(spans[5][0], 0); + BOOST_CHECK_EQUAL(spans[5][1], 2); + BOOST_CHECK_EQUAL(spans[5][2], 4); + + BOOST_CHECK_EQUAL(spans[6][0], 1); + BOOST_CHECK_EQUAL(spans[6][1], 3); + + BOOST_CHECK_EQUAL(spans[7][0], 1); + BOOST_CHECK_EQUAL(spans[7][1], 4); + + BOOST_CHECK_EQUAL(spans[8][0], 1); + BOOST_CHECK_EQUAL(spans[8][1], 4); + BOOST_CHECK_EQUAL(spans[8][2], 7); + +} + +BOOST_FIXTURE_TEST_CASE( ran_test, fixture ) +{ + using namespace boost::numeric::ublas; + + BOOST_CHECK ( ( ran(0,0,0) == spans[0]) ); + + BOOST_CHECK ( ( ran(0,1,0) == spans[2]) ); + BOOST_CHECK ( ( ran(0, 0) == spans[2]) ); + + + BOOST_CHECK ( ( ran(0,1,2) == spans[3]) ); + BOOST_CHECK ( ( ran(0, 2) == spans[3]) ); + + BOOST_CHECK ( ( ran(1,1,2) == spans[4]) ); + BOOST_CHECK ( ( ran(1, 2) == spans[4]) ); + + BOOST_CHECK ( ( ran(0,2,4) == spans[5]) ); + BOOST_CHECK ( ( ran(1,2,4) == spans[6]) ); + BOOST_CHECK ( ( ran(1,3,5) == spans[7]) ); + BOOST_CHECK ( ( ran(1,3,7) == spans[8]) ); +} + +BOOST_AUTO_TEST_SUITE_END(); diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp new file mode 100644 index 000000000..141496fa1 --- /dev/null +++ b/test/tensor/test_subtensor.cpp @@ -0,0 +1,575 @@ +// Copyright (c) 2018 Cem Bassoy +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which started as a Google Summer of Code project. +// + + +#include +#include + +#include "utility.hpp" +#include +#include +#include +#include + + + +BOOST_AUTO_TEST_SUITE ( subtensor_testsuite/*, + *boost::unit_test::depends_on("tensor_testsuite") + *boost::unit_test::depends_on("span_testsuite") + *boost::unit_test::depends_on("subtensor_utility_testsuite")*/) ; + +// double,std::complex + + + +using test_types = zip::with_t; + + + +struct fixture_shape +{ + using shape = boost::numeric::ublas::basic_extents; + + fixture_shape() : extents{ + shape{}, // 0 + shape{1,1}, // 1 + shape{1,2}, // 2 + shape{2,1}, // 3 + shape{2,3}, // 4 + shape{2,3,1}, // 5 + shape{4,1,3}, // 6 + shape{1,2,3}, // 7 + shape{4,2,3}, // 8 + shape{4,2,3,5} // 9 + } + {} + std::vector extents; +}; + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixture_shape ) +{ + + namespace ub = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ub::dynamic_tensor; + using subtensor_type = ub::subtensor; + + + auto check = [](auto const& e) { + auto t = tensor_type{e}; + auto s = subtensor_type(t); + BOOST_CHECK_EQUAL ( s.size() , t.size() ); + BOOST_CHECK_EQUAL ( s.rank() , t.rank() ); + if(e.empty()) { + BOOST_CHECK_EQUAL ( s.empty(), t.empty() ); + BOOST_CHECK_EQUAL ( s. data(), t. data() ); + } + else{ + BOOST_CHECK_EQUAL ( !s.empty(), !t.empty() ); + BOOST_CHECK_EQUAL ( s. data(), t. data() ); + } + }; + + for(auto const& e : extents) + check(e); + +} + + + +BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) +{ + + namespace ub = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ub::dynamic_tensor; + using subtensor_type = ub::subtensor; + using span = ub::sliced_span; + + + { + auto A = tensor_type{}; + auto Asub = subtensor_type( A ); + + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } + + + + { + auto A = tensor_type{1,1}; + auto Asub = subtensor_type( A, 0, 0 ); + + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } + + + { + auto A = tensor_type{1,2}; + auto Asub = subtensor_type( A, 0, span{} ); + + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } + { + auto A = tensor_type{1,2}; + auto Asub = subtensor_type( A, 0, 1 ); + + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), 1 ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), 1 ); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), 1 ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), 1 ); + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } + + + { + auto A = tensor_type{2,3}; + auto Asub = subtensor_type( A, 0, 1 ); + auto B = tensor_type(Asub.extents()); + + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } + + { + auto A = tensor_type{4,3}; + auto Asub = subtensor_type( A, span(1,2), span(1,ub::end) ); + auto B = tensor_type(Asub.extents()); + + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } + + { + auto A = tensor_type{4,3,5}; + auto Asub = subtensor_type( A, span(1,2), span(1,ub::end), span(2,4) ); + + auto B = tensor_type(Asub.extents()); + + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(2), A.strides().at(2) ); + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(2) , 3 ); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(2), B.strides().at(2) ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1)+ + Asub.spans().at(2).first()*A.strides().at(2)); + } + +} + +#if 0 + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_copy_ctor_test, value, test_types, fixture_shape ) +{ + namespace ub = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ub::dynamic_tensor; + using subtensor_type = ub::subtensor; + using span = ub::sliced_span; + + auto check = [](auto const& e) + { + auto r = tensor_type{e}; + auto t = r; + BOOST_CHECK_EQUAL ( t.size() , r.size() ); + BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); + BOOST_CHECK ( t.strides() == r.strides() ); + BOOST_CHECK ( t.extents() == r.extents() ); + + if(e.empty()) { + BOOST_CHECK ( t.empty() ); + BOOST_CHECK_EQUAL ( t.data() , nullptr); + } + else{ + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + } + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], r[i] ); + }; + + for(auto const& e : extents) + check(e); +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_types, fixture_shape ) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + using other_layout_type = std::conditional_t::value, ublas::tag::last_order, ublas::tag::first_order>; + using other_tensor_type = ublas::tensor; + + + for(auto const& e : extents) + { + auto r = tensor_type{e}; + other_tensor_type t = r; + tensor_type q = t; + + BOOST_CHECK_EQUAL ( t.size() , r.size() ); + BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); + BOOST_CHECK ( t.extents() == r.extents() ); + + BOOST_CHECK_EQUAL ( q.size() , r.size() ); + BOOST_CHECK_EQUAL ( q.rank() , r.rank() ); + BOOST_CHECK ( q.strides() == r.strides() ); + BOOST_CHECK ( q.extents() == r.extents() ); + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( q[i], r[i] ); + } +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types, fixture ) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + auto check = [](auto const& e) + { + auto r = tensor_type{e}; + auto t = std::move(r); + BOOST_CHECK_EQUAL ( t.size() , e.product() ); + BOOST_CHECK_EQUAL ( t.rank() , e.size() ); + + if(e.empty()) { + BOOST_CHECK ( t.empty() ); + BOOST_CHECK_EQUAL ( t.data() , nullptr); + } + else{ + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + } + + }; + + for(auto const& e : extents) + check(e); +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_types, fixture ) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + std::random_device device{}; + std::minstd_rand0 generator(device()); + + using distribution_type = std::conditional_t, std::uniform_int_distribution<>, std::uniform_real_distribution<> >; + auto distribution = distribution_type(1,6); + + for(auto const& e : extents){ + auto r = static_cast(distribution(generator)); + auto t = tensor_type{e,r}; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], r ); + } +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + using array_type = typename tensor_type::array_type; + + for(auto const& e : extents) { + auto a = array_type(e.product()); + auto v = value_type {}; + + for(auto& aa : a){ + aa = v; + v += value_type{1}; + } + auto t = tensor_type{e, a}; + v = value_type{}; + + for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}) + BOOST_CHECK_EQUAL( t[i], v); + } +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + for(auto const& e : extents) { + auto t = tensor_type{e}; + auto v = value_type {}; + for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}){ + t[i] = v; + BOOST_CHECK_EQUAL( t[i], v ); + + t(i) = v; + BOOST_CHECK_EQUAL( t(i), v ); + } + } +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + auto check1 = [](const tensor_type& t) + { + auto v = value_type{}; + for(auto k = 0ul; k < t.size(); ++k){ + BOOST_CHECK_EQUAL(t[k], v); + v+=value_type{1}; + } + }; + + auto check2 = [](const tensor_type& t) + { + std::array k; + auto r = std::is_same_v ? 1 : 0; + auto q = std::is_same_v ? 1 : 0; + auto v = value_type{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[q] = 0ul; k[q] < t.size(q); ++k[q]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1]), v); + v+=value_type{1}; + } + } + }; + + auto check3 = [](const tensor_type& t) + { + std::array k; + using op_type = std::conditional_t, std::minus<>, std::plus<>>; + auto r = std::is_same_v ? 2 : 0; + auto o = op_type{}; + auto v = value_type{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ + for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2]), v); + v+=value_type{1}; + } + } + } + }; + + auto check4 = [](const tensor_type& t) + { + std::array k; + using op_type = std::conditional_t, std::minus<>, std::plus<>>; + auto r = std::is_same_v ? 3 : 0; + auto o = op_type{}; + auto v = value_type{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ + for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ + for(k[o(r,3)] = 0ul; k[o(r,3)] < t.size(o(r,3)); ++k[o(r,3)]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2],k[3]), v); + v+=value_type{1}; + } + } + } + } + }; + + auto check = [check1,check2,check3,check4](auto const& e) { + auto t = tensor_type{e}; + auto v = value_type {}; + for(auto i = 0ul; i < t.size(); ++i){ + t[i] = v; + v+=value_type{1}; + } + + if(t.rank() == 1) check1(t); + else if(t.rank() == 2) check2(t); + else if(t.rank() == 3) check3(t); + else if(t.rank() == 4) check4(t); + + }; + + for(auto const& e : extents) + check(e); +} + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + + for(auto const& efrom : extents){ + for(auto const& eto : extents){ + + auto v = value_type {}; + v+=value_type{1}; + auto t = tensor_type{efrom, v}; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], v ); + + t.reshape(eto); + for(auto i = 0ul; i < std::min(efrom.product(),eto.product()); ++i) + BOOST_CHECK_EQUAL( t[i], v ); + + BOOST_CHECK_EQUAL ( t.size() , eto.product() ); + BOOST_CHECK_EQUAL ( t.rank() , eto.size() ); + BOOST_CHECK ( t.extents() == eto ); + + if(efrom != eto){ + for(auto i = efrom.product(); i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], value_type{} ); + } + } + } +} + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + for(auto const& e_t : extents){ + for(auto const& e_r : extents) { + + auto v = value_type {} + value_type{1}; + auto w = value_type {} + value_type{2}; + auto t = tensor_type{e_t, v}; + auto r = tensor_type{e_r, w}; + + std::swap( r, t ); + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL( t[i], w ); + + BOOST_CHECK_EQUAL ( t.size() , e_r.product() ); + BOOST_CHECK_EQUAL ( t.rank() , e_r.size() ); + BOOST_CHECK ( t.extents() == e_r ); + + for(auto i = 0ul; i < r.size(); ++i) + BOOST_CHECK_EQUAL( r[i], v ); + + BOOST_CHECK_EQUAL ( r.size() , e_t.product() ); + BOOST_CHECK_EQUAL ( r.rank() , e_t.size() ); + BOOST_CHECK ( r.extents() == e_t ); + + + } + } +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_types, fixture) +{ + using namespace boost::numeric; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor; + + for(auto const& e : extents) + { + auto v = value_type {} + value_type{1}; + auto t = tensor_type{e, v}; + + BOOST_CHECK_EQUAL( std::distance(t.begin(), t.end ()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.rbegin(), t.rend()), t.size() ); + + BOOST_CHECK_EQUAL( std::distance(t.cbegin(), t.cend ()), t.size() ); + BOOST_CHECK_EQUAL( std::distance(t.crbegin(), t.crend()), t.size() ); + + if(t.size() > 0) { + BOOST_CHECK( t.data() == std::addressof( *t.begin () ) ) ; + BOOST_CHECK( t.data() == std::addressof( *t.cbegin() ) ) ; + } + } +} + +#endif + +BOOST_AUTO_TEST_SUITE_END(); diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp new file mode 100644 index 000000000..e8c5aed19 --- /dev/null +++ b/test/tensor/test_subtensor_utility.cpp @@ -0,0 +1,394 @@ +// Copyright (c) 2018 Cem Bassoy +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which started as a Google Summer of Code project. +// + + +#include +#include + +#include "utility.hpp" +#include +#include +#include +#include +#include + + + +BOOST_AUTO_TEST_SUITE ( subtensor_utility_testsuite ) ; + + + +struct fixture_sliced_span { + using span_type = boost::numeric::ublas::sliced_span; + + fixture_sliced_span() + : spans{ + span_type(), // 0, a(:) + span_type(0,0), // 1, a(0:0) + span_type(0,2), // 2, a(0:2) + span_type(1,1), // 3, a(1:1) + span_type(1,3), // 4, a(1:3) + span_type(1,boost::numeric::ublas::end), // 5, a(1:end) + span_type(boost::numeric::ublas::end) // 6, a(end) + } + {} + std::vector spans; +}; + + +BOOST_FIXTURE_TEST_CASE( transform_sliced_span_test, fixture_sliced_span ) +{ + + using namespace boost::numeric; + +// template + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(2) ) == ublas::sliced_span(0,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(3) ) == ublas::sliced_span(0,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(4) ) == ublas::sliced_span(0,3) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(2) ) == ublas::sliced_span(0,0) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(3) ) == ublas::sliced_span(0,0) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(4) ) == ublas::sliced_span(0,0) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(3) ) == ublas::sliced_span(0,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(4) ) == ublas::sliced_span(0,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(5) ) == ublas::sliced_span(0,2) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(2) ) == ublas::sliced_span(1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(3) ) == ublas::sliced_span(1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(4) ) == ublas::sliced_span(1,1) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(4) ) == ublas::sliced_span(1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(5) ) == ublas::sliced_span(1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(6) ) == ublas::sliced_span(1,3) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(4) ) == ublas::sliced_span(1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(5) ) == ublas::sliced_span(1,4) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(6) ) == ublas::sliced_span(1,5) ); + + + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(4) ) == ublas::sliced_span(3,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(5) ) == ublas::sliced_span(4,4) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(6) ) == ublas::sliced_span(5,5) ); +} + + +struct fixture_strided_span { + using span_type = boost::numeric::ublas::strided_span; + + fixture_strided_span() + : spans{ + span_type(), // 0, a(:) + span_type(0,1,0), // 1, a(0:1:0) + span_type(0,2,2), // 2, a(0:2:2) + span_type(1,1,1), // 3, a(1:1:1) + span_type(1,1,3), // 4, a(1:1:3) + span_type(1,2,boost::numeric::ublas::end), // 5, a(1:2:end) + span_type(boost::numeric::ublas::end) // 6, a(end) + } + {} + std::vector spans; +}; + + +BOOST_FIXTURE_TEST_CASE( transform_strided_span_test, fixture_strided_span ) +{ + + using namespace boost::numeric; + +// template + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(2) ) == ublas::strided_span(0,1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(3) ) == ublas::strided_span(0,1,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(4) ) == ublas::strided_span(0,1,3) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(2) ) == ublas::strided_span(0,1,0) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(3) ) == ublas::strided_span(0,1,0) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(4) ) == ublas::strided_span(0,1,0) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(3) ) == ublas::strided_span(0,2,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(4) ) == ublas::strided_span(0,2,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(5) ) == ublas::strided_span(0,2,2) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(2) ) == ublas::strided_span(1,1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(3) ) == ublas::strided_span(1,1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(4) ) == ublas::strided_span(1,1,1) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(4) ) == ublas::strided_span(1,1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(5) ) == ublas::strided_span(1,1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(6) ) == ublas::strided_span(1,1,3) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(4) ) == ublas::strided_span(1,2,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(5) ) == ublas::strided_span(1,2,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(6) ) == ublas::strided_span(1,2,5) ); + + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(4) ) == ublas::strided_span(3,1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(5) ) == ublas::strided_span(4,1,4) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(6) ) == ublas::strided_span(5,1,5) ); +} + + + + + + +struct fixture_shape { + using shape = boost::numeric::ublas::basic_extents; + + fixture_shape() : extents{ + shape{}, // 0 + shape{1,1}, // 1 + shape{1,2}, // 2 + shape{2,1}, // 3 + shape{2,3}, // 4 + shape{2,3,1}, // 5 + shape{4,1,3}, // 6 + shape{1,2,3}, // 7 + shape{4,2,3}, // 8 + shape{4,2,3,5} // 9 + } + {} + std::vector extents; +}; + +BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) +{ + using namespace boost::numeric::ublas; + using span = sliced_span; + + // shape{} + { + auto v = detail::generate_span_array(extents[0]); + auto r = std::vector{}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + + // shape{1,1} + { + auto v = detail::generate_span_array(extents[1],span(),span()); + auto r = std::vector{span(0,0),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,1} + { + auto v = detail::generate_span_array(extents[1],end,span(end)); + auto r = std::vector{span(0,0),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,1} + { + auto v = detail::generate_span_array(extents[1],0,end); + auto r = std::vector{span(0,0),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,2} + { + auto v = detail::generate_span_array(extents[2],0,end); + auto r = std::vector{span(0,0),span(1,1)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{1,2} + { + auto v = detail::generate_span_array(extents[2],0,1); + auto r = std::vector{span(0,0),span(1,1)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + { + auto v = detail::generate_span_array(extents[2],span(),span()); + auto r = std::vector{span(0,0),span(0,1)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{2,3} + { + auto v = detail::generate_span_array(extents[4],span(),span()); + auto r = std::vector{span(0,1),span(0,2)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + { + auto v = detail::generate_span_array(extents[4],1,span(1,end)); + auto r = std::vector{span(1,1),span(1,2)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + // shape{2,3,1} + { + auto v = detail::generate_span_array(extents[5],span(),span(),0); + auto r = std::vector{span(0,1),span(0,2),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } + + { + auto v = detail::generate_span_array(extents[5],1,span(),end); + auto r = std::vector{span(1,1),span(0,2),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } +} + + + +struct fixture_span_vector_shape { + using shape = boost::numeric::ublas::basic_extents; + using span = boost::numeric::ublas::sliced_span; + + + fixture_span_vector_shape() + : extents_{ + shape{}, // 0 + shape{1,1}, // 1 + shape{1,2}, // 2 + shape{2,3}, // 3 + shape{4,2,3}, // 4 + shape{4,2,3,5} // 5 + } + , span_vectors_{ + /*A(:)*/ boost::numeric::ublas::detail::generate_span_array(extents_[0]), + /*A(0,0)*/ boost::numeric::ublas::detail::generate_span_array(extents_[1],0,0), + /*A(0,:)*/ boost::numeric::ublas::detail::generate_span_array(extents_[2],0,span()), + /*A(1,1:2)*/ boost::numeric::ublas::detail::generate_span_array(extents_[3],1,span(1,2)), + /*A(1:3,1,1:2)*/ boost::numeric::ublas::detail::generate_span_array(extents_[4],span(1,3),1,span(0,1)), + /*A(1:3,1,0:1,2:4)*/ boost::numeric::ublas::detail::generate_span_array(extents_[5],span(1,3),1,span(0,1),span(2,4)), + } + , reference_ { + shape{}, + shape{1,1}, + shape{1,2}, + shape{1,2}, + shape{3,1,2}, + shape{3,1,2,3} + } + { + assert(extents_.size() == reference_.size()); + assert(extents_.size() == std::tuple_size_v); + } + std::array extents_; + std::tuple< + std::array, + std::array, + std::array, + std::array, + std::array, + std::array + > span_vectors_; + + std::array reference_; +}; + + + +BOOST_FIXTURE_TEST_CASE( extents_test, fixture_span_vector_shape ) +{ + using namespace boost::numeric; + + BOOST_CHECK ( std::equal( std::get<0>(reference_).begin(), std::get<0>(reference_).end(), ublas::detail::compute_extents( std::get<0>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( std::get<1>(reference_).begin(), std::get<1>(reference_).end(), ublas::detail::compute_extents( std::get<1>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( std::get<2>(reference_).begin(), std::get<2>(reference_).end(), ublas::detail::compute_extents( std::get<2>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( std::get<3>(reference_).begin(), std::get<3>(reference_).end(), ublas::detail::compute_extents( std::get<3>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( std::get<4>(reference_).begin(), std::get<4>(reference_).end(), ublas::detail::compute_extents( std::get<4>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( std::get<5>(reference_).begin(), std::get<5>(reference_).end(), ublas::detail::compute_extents( std::get<5>(span_vectors_) ).begin() ) ); + +} + + +using test_types = std::tuple; + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( offset_test, layout, test_types, fixture_span_vector_shape ) +{ + using namespace boost::numeric; + using strides = ublas::basic_strides; + + { + auto s = std::get<0>(span_vectors_); + auto w = strides( std::get<0>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, 0 ); + } + + { + auto s = std::get<1>(span_vectors_); + auto w = strides( std::get<1>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, 0 ); + } + + { + auto s = std::get<2>(span_vectors_); + auto w = strides( std::get<2>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, 0 ); + } + + { + auto s = std::get<3>(span_vectors_); + auto w = strides( std::get<3>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] ); + } + + { + auto s = std::get<4>(span_vectors_); + auto w = strides( std::get<4>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] ); + } + + + { + auto s = std::get<5>(span_vectors_); + auto w = ( std::get<5>(extents_) ); + auto o = ublas::detail::compute_offset(w,s); + BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] + s[3].first()*w[3] ); + } + +} + + +#if 0 + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( span_strides_test, layout, test_types, fixture_span_vector_shape ) +{ + + /*A(:)*/ + /*A(0,0)*/ + /*A(0,:)*/ + /*A(1,1:2)*/ + /*A(1:3,1,1:2)*/ + /*A(1:3,1,0:1,2:4)*/ + + + // auto span_strides(strides_type const& strides, std::vector const& spans) + + using namespace boost::numeric; + using strides = ublas::basic_strides; + + for(unsigned k = 0; k < span_vectors_.size(); ++k) + { + auto s = span_vectors_[k]; + auto w = strides( extents_[k] ); + auto ss = ublas::detail::span_strides( w, s ); + for(unsigned i = 0; i < w.size(); ++i) + BOOST_CHECK_EQUAL( ss[i], w[i]*s[i].step() ); + } + +} + +#endif + +BOOST_AUTO_TEST_SUITE_END(); From bd7a7add6c7387e7250581cdc29ee26355e8e094 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Thu, 4 Jun 2020 19:58:32 +0200 Subject: [PATCH 03/40] added addtional subtensor test. --- IDEs/qtcreator/include/tensor/tensor.pri | 1 - IDEs/qtcreator/test/test_tensor.pro | 1 + .../boost/numeric/ublas/tensor/subtensor.hpp | 126 ++++++------------ test/tensor/test_subtensor.cpp | 58 +++++--- 4 files changed, 80 insertions(+), 106 deletions(-) diff --git a/IDEs/qtcreator/include/tensor/tensor.pri b/IDEs/qtcreator/include/tensor/tensor.pri index 91cd18b96..3dc0dc2f0 100644 --- a/IDEs/qtcreator/include/tensor/tensor.pri +++ b/IDEs/qtcreator/include/tensor/tensor.pri @@ -42,7 +42,6 @@ HEADERS += \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp - HEADERS += \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/inner_prod.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/function/init.hpp \ diff --git a/IDEs/qtcreator/test/test_tensor.pro b/IDEs/qtcreator/test/test_tensor.pro index 856775ee4..54d20dcc1 100644 --- a/IDEs/qtcreator/test/test_tensor.pro +++ b/IDEs/qtcreator/test/test_tensor.pro @@ -37,6 +37,7 @@ HEADERS += \ $${TEST_DIR}/utility.hpp SOURCES += \ + $${TEST_DIR}/test_access.cpp \ $${TEST_DIR}/test_algorithms.cpp \ $${TEST_DIR}/test_einstein_notation.cpp \ $${TEST_DIR}/test_expression.cpp \ diff --git a/include/boost/numeric/ublas/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/subtensor.hpp index d03130610..5b07e9c5f 100644 --- a/include/boost/numeric/ublas/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor.hpp @@ -156,84 +156,51 @@ class subtensor > /** @brief Constructs a tensor view from a tensor without any range. * - * @note can be regarded as a pointer to a tensor + * @note is similar to a handle to a tensor */ explicit subtensor (tensor_type const& t) - : super_type () - , spans_() - , extents_ (t.extents()) - , strides_ (t.strides()) - , span_strides_(t.strides()) - , data_ (t.data()) + : super_type () + , spans_ () + , extents_ (t.extents()) + , strides_ (t.strides()) + , span_strides_ (t.strides()) + , data_ (t.data()) { } -#if 0 - /** @brief Constructs a tensor with a \c shape and initiates it with one-dimensional data - * - * @code tensor A{extents{4,2,3}, array }; @endcode - * - * - * @param s initial tensor dimension extents - * @param a container of \c array_type that is copied according to the storage layout - */ - BOOST_UBLAS_INLINE - tensor (extents_type const& s, const array_type &a) - : tensor_expression_type() //tensor_container() - , extents_ (s) - , strides_ (extents_) - , data_ (a) - { - if(this->extents_.product() != this->data_.size()) - throw std::runtime_error("Error in boost::numeric::ublas::tensor: size of provided data and specified extents do not match."); - } - - - - /** @brief Constructs a tensor using a shape tuple and initiates it with a value. - * - * @code tensor A{extents{4,2,3}, 1 }; @endcode - * - * @param e initial tensor dimension extents - * @param i initial value of all elements of type \c value_type - */ - BOOST_UBLAS_INLINE - tensor (extents_type const& e, const value_type &i) - : tensor_expression_type() //tensor_container () - , extents_ (e) - , strides_ (extents_) - , data_ (extents_.product(), i) - {} - /** @brief Constructs a tensor from another tensor * * @param v tensor to be copied. */ - BOOST_UBLAS_INLINE - tensor (const tensor &v) - : tensor_expression_type() - , extents_ (v.extents_) - , strides_ (v.strides_) - , data_ (v.data_ ) + inline + subtensor (const subtensor &v) + : super_type () + , spans_ (v.spans_) + , extents_ (v.extents_) + , strides_ (v.strides_) + , span_strides_ (v.span_strides_) + , data_ (v.data_) {} - /** @brief Constructs a tensor from another tensor * * @param v tensor to be moved. */ BOOST_UBLAS_INLINE - tensor (tensor &&v) - : tensor_expression_type() //tensor_container () - , extents_ (std::move(v.extents_)) - , strides_ (std::move(v.strides_)) - , data_ (std::move(v.data_ )) + subtensor (subtensor &&v) + : super_type () + , spans_ (std::move(v.spans_)) + , extents_ (std::move(v.extents_)) + , strides_ (std::move(v.strides_)) + , span_strides_ (std::move(v.span_strides_)) + , data_ (std::move(v.data_)) {} +#if 0 /** @brief Constructs a tensor with a matrix * @@ -415,86 +382,74 @@ class subtensor > /** @brief Returns true if the subtensor is empty (\c size==0) */ - BOOST_UBLAS_INLINE - bool empty () const { - return this->size() == size_type(0); + inline bool empty () const { + return this->size() == 0ul; } /** @brief Returns the size of the subtensor */ - BOOST_UBLAS_INLINE - size_type size () const { + inline size_type size () const { return product(this->extents_); } /** @brief Returns the size of the subtensor */ - BOOST_UBLAS_INLINE - size_type size (size_type r) const { + inline size_type size (size_type r) const { return this->extents_.at(r); } /** @brief Returns the number of dimensions/modes of the subtensor */ - BOOST_UBLAS_INLINE - size_type rank () const { + inline size_type rank () const { return this->extents_.size(); } /** @brief Returns the number of dimensions/modes of the subtensor */ - BOOST_UBLAS_INLINE - size_type order () const { + inline size_type order () const { return this->extents_.size(); } /** @brief Returns the strides of the subtensor */ - BOOST_UBLAS_INLINE - auto const& strides () const { + inline auto const& strides () const { return this->strides_; } /** @brief Returns the span strides of the subtensor */ - BOOST_UBLAS_INLINE - auto const& span_strides () const { + inline auto const& span_strides () const { return this->span_strides_; } /** @brief Returns the span strides of the subtensor */ - BOOST_UBLAS_INLINE - auto const& spans () const { + inline auto const& spans () const { return this->spans_; } /** @brief Returns the extents of the subtensor */ - BOOST_UBLAS_INLINE - auto const& extents () const { + inline auto const& extents () const { return this->extents_; } /** @brief Returns a \c const reference to the container. */ - BOOST_UBLAS_INLINE - const_pointer data () const { + inline const_pointer data () const { return this->data_; } /** @brief Returns a \c const reference to the container. */ - BOOST_UBLAS_INLINE - pointer data () { + inline pointer data () { return this->data_; } -#if 0 + /** @brief Element access using a single index. * * @code auto a = A[i]; @endcode * * @param i zero-based index where 0 <= i < this->size() */ - BOOST_UBLAS_INLINE - const_reference operator [] (size_type i) const { + inline const_reference operator [] (size_type i) const { return this->data_[i]; } @@ -505,13 +460,12 @@ class subtensor > * * @param i zero-based index where 0 <= i < this->size() */ - BOOST_UBLAS_INLINE - reference operator [] (size_type i) + inline reference operator [] (size_type i) { - return this->data_[i]; + return this->data_[i]; } - +#if 0 /** @brief Element access using a multi-index or single-index. * * diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index 141496fa1..24788ce6b 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -21,16 +21,13 @@ -BOOST_AUTO_TEST_SUITE ( subtensor_testsuite/*, - *boost::unit_test::depends_on("tensor_testsuite") - *boost::unit_test::depends_on("span_testsuite") - *boost::unit_test::depends_on("subtensor_utility_testsuite")*/) ; +BOOST_AUTO_TEST_SUITE ( subtensor_testsuite ) ; // double,std::complex -using test_types = zip::with_t; +using test_types = zip>::with_t; @@ -213,9 +210,9 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) } -#if 0 -BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_copy_ctor_test, value, test_types, fixture_shape ) + +BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, fixture_shape ) { namespace ub = boost::numeric::ublas; using value_type = typename value::first_type; @@ -224,32 +221,55 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_copy_ctor_test, value, test_types, using subtensor_type = ub::subtensor; using span = ub::sliced_span; + + auto check = [](auto const& e) { - auto r = tensor_type{e}; - auto t = r; - BOOST_CHECK_EQUAL ( t.size() , r.size() ); - BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); - BOOST_CHECK ( t.strides() == r.strides() ); - BOOST_CHECK ( t.extents() == r.extents() ); + + auto A = tensor_type{e}; + value_type i{}; + for(auto & a : A) + a = i+=value_type{1}; + + auto Asub = subtensor_type( A ); + auto Bsub = subtensor_type( A ); + + + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + + BOOST_CHECK( Bsub.span_strides() == A.strides() ); + BOOST_CHECK( Bsub.strides() == A.strides() ); + BOOST_CHECK( Bsub.extents() == A.extents() ); + BOOST_CHECK( Bsub.data() == A.data() ); + + BOOST_CHECK_EQUAL ( Bsub.size() , A.size() ); + BOOST_CHECK_EQUAL ( Bsub.rank() , A.rank() ); + + if(e.empty()) { - BOOST_CHECK ( t.empty() ); - BOOST_CHECK_EQUAL ( t.data() , nullptr); + BOOST_CHECK ( Bsub.empty() ); + BOOST_CHECK_EQUAL ( Bsub.data() , nullptr); } else{ - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); + BOOST_CHECK ( !Bsub.empty() ); + BOOST_CHECK_NE ( Bsub.data() , nullptr); } - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], r[i] ); + for(auto i = 0ul; i < Asub.size(); ++i) + BOOST_CHECK_EQUAL( Asub[i], Bsub[i] ); + }; for(auto const& e : extents) check(e); + } +#if 0 BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_types, fixture_shape ) { From 881243a10bacba97a8d3afd9eb157c5396d2ffec Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Fri, 19 Jun 2020 08:25:23 +0200 Subject: [PATCH 04/40] access functions for tensor and subtensor access added. --- IDEs/qtcreator/include/include.pro | 1 + IDEs/qtcreator/test/test_tensor.pro | 5 + IDEs/qtcreator/ublas_develop.pro | 2 + include/boost/numeric/ublas/tensor/access.hpp | 154 ++++++++ .../ublas/tensor/detail/extents_functions.hpp | 332 ++++++++++++++++++ .../numeric/ublas/tensor/dynamic_strides.hpp | 200 +++++++++++ test/tensor/Jamfile | 7 +- test/tensor/test_access.cpp | 305 ++++++++++++++++ test/tensor/test_subtensor_utility.cpp | 8 +- 9 files changed, 1009 insertions(+), 5 deletions(-) create mode 100644 include/boost/numeric/ublas/tensor/access.hpp create mode 100644 include/boost/numeric/ublas/tensor/detail/extents_functions.hpp create mode 100644 include/boost/numeric/ublas/tensor/dynamic_strides.hpp create mode 100644 test/tensor/test_access.cpp diff --git a/IDEs/qtcreator/include/include.pro b/IDEs/qtcreator/include/include.pro index a5aeead8b..3d28ef8f6 100644 --- a/IDEs/qtcreator/include/include.pro +++ b/IDEs/qtcreator/include/include.pro @@ -4,6 +4,7 @@ TARGET = ublas CONFIG += staticlib depend_includepath CONFIG -= qt CONFIG += c++20 + INCLUDE_DIR=../../../include include(detail/detail.pri) diff --git a/IDEs/qtcreator/test/test_tensor.pro b/IDEs/qtcreator/test/test_tensor.pro index 54d20dcc1..fffe1de57 100644 --- a/IDEs/qtcreator/test/test_tensor.pro +++ b/IDEs/qtcreator/test/test_tensor.pro @@ -5,6 +5,8 @@ CONFIG += staticlib depend_includepath console CONFIG -= qt CONFIG += c++20 +CONFIG += c++17 + #QMAKE_CXXFLAGS += -fno-inline QMAKE_CXXFLAGS =-std=c++20 QMAKE_CXXFLAGS +=-Wall -Wpedantic -Wextra @@ -33,6 +35,9 @@ TEST_DIR = ../../../test/tensor include(../include/tensor/tensor.pri) +HEADERS += \ + $${TEST_DIR}/utility.hpp + HEADERS += \ $${TEST_DIR}/utility.hpp diff --git a/IDEs/qtcreator/ublas_develop.pro b/IDEs/qtcreator/ublas_develop.pro index 49fc2d99c..be5315356 100644 --- a/IDEs/qtcreator/ublas_develop.pro +++ b/IDEs/qtcreator/ublas_develop.pro @@ -3,6 +3,8 @@ CONFIG += ordered SUBDIRS = include # examples # benchmarks OTHER_FILES += ../../changelog.txt +CONFIG += c++17 +QMAKE_CXXFLAGS += -std=c++17 #include (tests.pri) diff --git a/include/boost/numeric/ublas/tensor/access.hpp b/include/boost/numeric/ublas/tensor/access.hpp new file mode 100644 index 000000000..e83e588c9 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/access.hpp @@ -0,0 +1,154 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer IOSB, Ettlingen, Germany +// + + +#ifndef _BOOST_UBLAS_TENSOR_ACCESS_HPP +#define _BOOST_UBLAS_TENSOR_ACCESS_HPP + + +#include +#include +#include + +namespace boost::numeric::ublas::detail{ + + +/** \brief Computes a single index from multi-index of a tensor or subtensor + * + * \param i iterator to a multi-index vector of length std::distance(begin,end) + * \param ip iterator to a multi-index vector of length std::distance(begin,end) + * \param w iterator to a stride vector of length std::distance(begin,end) or greater +*/ +template +constexpr inline auto compute_single_index(InputIt1 i, InputIt1 ip, InputIt2 w) +{ + return std::inner_product(i,ip,w,0ul,std::plus<>{},std::multiplies<>{}); +} + + +/** \brief Computes a single index from a multi-index of a tensor or subtensor + * + * \param i iterator to a multi-index vector of length std::distance(begin,end) + * \param ip iterator to a multi-index vector of length std::distance(begin,end) + * \param w iterator to a stride vector of length std::distance(begin,end) or greater +*/ +template +constexpr inline auto compute_single_index(InputIt1 i, InputIt1 /*ip*/, InputIt2 w) +{ + if constexpr(p==0u) return 0ul; + else if constexpr(p >1u) return compute_single_index(i,i,w)+i[p-1]*w[p-1]; + else return i[p-1]*w[p-1]; +} + +/** @brief Computes a multi-index from single index of a tensor or subtensor + * + * j = compute_single_index (i, ip, w) + * compute_multi_index (j, w, wp, k) with k == i + * + * @param w begin input iterator to a container with tensor or subtensor strides of length std::distance(begin,end) + * @param wp end input iterator to a container with tensor or subtensor strides of length std::distance(begin,end) + * @param i begin output iterator to a container with tensor or subtensor indices length std::distance(begin,end) or greater +*/ +template +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +{ + if(w == wp) + return; + auto p = std::distance(w,wp); + auto kq = j; + //auto q = 0ul; + + + + for(int r = p-1; r >= 0; --r) + { + //q = l[r]-1; + i[r] = kq/w[r]; + kq -= w[r]*i[r]; + } + + //std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +} + + +template +constexpr inline void compute_multi_index_first(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +{ + if(w == wp) + return; + auto p = std::distance(w,wp); + auto kq = j; + //auto q = 0ul; + + + +// for(int r = p-1; r >= 0; --r) +// { +// //q = l[r]-1; +// i[r] = kq/w[r]; +// kq -= w[r]*i[r]; +// } + + std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +} + +template +constexpr inline void compute_multi_index_last(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +{ + if(w == wp) + return; + auto p = std::distance(w,wp); + auto kq = j; + + for(unsigned r = 0ul; r < p; ++r) { + i[r] = kq/w[r]; + kq -= w[r]*i[r]; + } + + //std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +} + + + +/** @brief Computes a single index from a multi-index of a dense tensor or subtensor + * + * @param j single index that is transformed into a multi-index + * @param w begin input iterator to a container with strides of length p + * @param i begin input iterator to a container with indices of length p or greater +*/ +template +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i) +{ + if constexpr (p==0u) return; + else if constexpr (p >1u) {i[p-1]=j/w[p-1]; compute_multi_index(j-w[p-1]*i[p-1],w,w,i); } + else {i[p-1]=j/w[p-1]; } +} + + +/** @brief Computes a single (relative memory) index of a dense tensor from a single index of one of its subtensor + * + * @param jv single index of a subtensor that is transformed into a single index of a dense tensor + * @param w begin input iterator of a container with tensor strides of length std::distance(w,wp) + * @param wp end input iterator of a container with tensor strides of length std::distance(w,wp) + * @param v begin input iterator of a container with subtensor strides of length std::distance(w,wp) or greater +*/ +template +constexpr inline auto compute_single_index(std::size_t jv, InputIt1 w, InputIt1 wp, InputIt2 v) +{ + return std::inner_product(w,wp,v,0ul, + std::plus<>{}, + [&jv](auto ww, auto vv) { auto k=jv/vv; jv-=vv*k; return ww*k; } + ); +} + +} // namespace + +#endif diff --git a/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp b/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp new file mode 100644 index 000000000..4273829d1 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp @@ -0,0 +1,332 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google +// + +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ +#define _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ + +#include +#include +#include +#include +#include +#include +#include + +namespace boost::numeric::ublas::detail{ + + template + constexpr auto push_back(basic_static_extents) -> basic_static_extents; + + template + constexpr auto push_front(basic_static_extents) -> basic_static_extents; + + template + constexpr auto squeeze_impl_remove_one( basic_static_extents, basic_static_extents num = basic_static_extents{} ){ + // executed when basic_static_extents is size of 1 + // @code basic_static_extents @endcode + if constexpr( sizeof...(E) == 0ul ){ + // if element E0 is 1 we return number list but we do not append + // it to the list + if constexpr( E0 == T(1) ){ + return num; + }else{ + // if element E0 is 1 we return number list but we append + // it to the list + return decltype(push_back(num)){}; + } + }else{ + if constexpr( E0 == T(1) ){ + // if element E0 is 1 we return number list but we do not append + // it to the list + return squeeze_impl_remove_one(basic_static_extents{}, num); + }else{ + // if element E0 is 1 we return number list but we append + // it to the list + auto n_num_list = decltype(push_back(num)){}; + return squeeze_impl_remove_one(basic_static_extents{}, n_num_list); + } + } + } + + template + constexpr auto squeeze_impl( basic_static_extents const& e ){ + + using extents_type = basic_static_extents; + + if constexpr( extents_type::_size <= typename extents_type::size_type(2) ){ + return e; + } + + using value_type = typename extents_type::value_type; + using size_type = typename extents_type::size_type; + + auto one_free_static_extents = squeeze_impl_remove_one(e); + + // check after removing 1s from the list are they same + // if same that means 1s does not exist and no need to + // squeeze + if constexpr( decltype(one_free_static_extents)::_size != extents_type::_size ){ + + // after squeezing, all the extents are 1s we need to + // return extents of (1, 1) + if constexpr( decltype(one_free_static_extents)::_size == size_type(0) ){ + + return basic_static_extents{}; + + }else if constexpr( decltype(one_free_static_extents)::_size == (1) ){ + // to comply with GNU Octave this check is made + // if position 2 contains 1 we push at back + // else we push at front + if constexpr( extents_type::at(1) == value_type(1) ){ + return decltype( push_back(one_free_static_extents) ){}; + }else{ + return decltype( push_front(one_free_static_extents) ){}; + } + + }else{ + return one_free_static_extents; + } + + }else{ + return e; + } + + } + + template + inline + constexpr auto squeeze_impl( basic_extents const& e ){ + using extents_type = basic_extents; + using base_type = typename extents_type::base_type; + using value_type = typename extents_type::value_type; + using size_type = typename extents_type::size_type; + + if( e.size() <= size_type(2) ){ + return e; + } + + auto not_one = [](auto const& el){ + return el != value_type(1); + }; + + // count non one values + size_type size = std::count_if(e.begin(), e.end(), not_one); + + // reserve space + base_type n_extents( std::max(size, size_type(2)), 1 ); + + // copying non 1s to the new extents + std::copy_if(e.begin(), e.end(), n_extents.begin(), not_one); + + // checking if extents size goes blow 2 + // if size of extents goes to 1 + // complying with GNU Octave + // if position 2 contains 1 we + // swap the pos + if( size < size_type(2) && e[1] != value_type(1) ){ + std::swap(n_extents[0], n_extents[1]); + } + + return extents_type(n_extents); + } + + template + inline + auto squeeze_impl( basic_fixed_rank_extents const& e ){ + if constexpr( N <= 2 ){ + return e; + }else{ + return squeeze_impl(basic_extents(e)); + } + } + + + +} // namespace boost::numeric::ublas::detail + +namespace boost::numeric::ublas { + +/** @brief Returns true if size > 1 and all elements > 0 or size == 1 && e[0] == 1 */ +template +[[nodiscard]] inline +constexpr bool valid(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::valid() : invalid type, type should be an extents"); + + auto greater_than_zero = [](auto const& a){ return a > 0u; }; + + if( e.size() == 1u ) return e[0] == 1u; + return !e.empty() && std::all_of(e.begin(), e.end(), greater_than_zero ); +} + +/** + * @code static_extents<4,1,2,3,4> s; + * std::cout< +[[nodiscard]] inline +std::string to_string(T const &e) { + + using value_type = typename T::value_type; + + static_assert(is_extents_v ||is_strides_v, + "boost::numeric::ublas::to_string() : invalid type, type should be an extents or a strides"); + + if ( e.empty() ) return "[]"; + + std::stringstream ss; + + ss << "[ "; + + std::copy( e.begin(), e.end() - 1, std::ostream_iterator(ss,", ") ); + + ss << e.back() << " ]"; + + return ss.str(); +} + +/** @brief Returns true if this has a scalar shape + * + * @returns true if (1,1,[1,...,1]) + */ +template +[[nodiscard]] inline +constexpr bool is_scalar(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::is_scalar() : invalid type, type should be an extents"); + + auto equal_one = [](auto const &a) { return a == 1u; }; + + return !e.empty() && std::all_of(e.begin(), e.end(), equal_one); +} + +/** @brief Returns true if this has a vector shape + * + * @returns true if (1,n,[1,...,1]) or (n,1,[1,...,1]) with n > 1 + */ +template +[[nodiscard]] inline +constexpr bool is_vector(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::is_vector() : invalid type, type should be an extents"); + + auto greater_one = [](auto const &a) { return a > 1u; }; + auto equal_one = [](auto const &a) { return a == 1u; }; + + if (e.size() == 0u) return false; + else if (e.size() == 1u) return e[0] > 1u; + else return std::any_of(e.begin(), e.begin() + 2, greater_one) && + std::any_of(e.begin(), e.begin() + 2, equal_one) && + std::all_of(e.begin() + 2, e.end(), equal_one); + +} + +/** @brief Returns true if this has a matrix shape + * + * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 + */ +template +[[nodiscard]] inline +constexpr bool is_matrix(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::is_matrix() : invalid type, type should be an extents"); + + auto greater_one = [](auto const &a) { return a > 1u; }; + auto equal_one = [](auto const &a) { return a == 1u; }; + + return ( e.size() >= 2u ) && std::all_of(e.begin(), e.begin() + 2, greater_one) && + std::all_of(e.begin() + 2, e.end(), equal_one); +} + + +/** @brief Returns true if this is has a tensor shape + * + * @returns true if !empty() && !is_scalar() && !is_vector() && !is_matrix() + */ +template +[[nodiscard]] inline +constexpr bool is_tensor(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::is_tensor() : invalid type, type should be an extents"); + + auto greater_one = [](auto const &a) { return a > 1u;}; + + return ( e.size() >= 3u ) && std::any_of(e.begin() + 2, e.end(), greater_one); +} + +/** @brief Eliminates singleton dimensions when size > 2 + * + * squeeze { 1,1} -> { 1,1} + * squeeze { 2,1} -> { 2,1} + * squeeze { 1,2} -> { 1,2} + * + * squeeze {1,2,3} -> { 2,3} + * squeeze {2,1,3} -> { 2,3} + * squeeze {1,3,1} -> { 1,3} + * + * @returns basic_extents with squeezed extents + */ +template +[[nodiscard]] inline +auto squeeze(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::squeeze() : invalid type, type should be an extents"); + + return detail::squeeze_impl(e); +} + +/** @brief Returns the product of extents */ +template +[[nodiscard]] inline +constexpr auto product(ExtentsType const &e) { + + static_assert(is_extents_v, "boost::numeric::ublas::product() : invalid type, type should be an extents"); + + if ( e.empty() ) return 0u; + else return std::accumulate(e.begin(), e.end(), 1u, std::multiplies<>()) ; +} + + +template && is_extents_v + , int> = 0 +> +[[nodiscard]] inline +constexpr bool operator==(LExtents const& lhs, RExtents const& rhs) noexcept{ + + static_assert( std::is_same_v, + "boost::numeric::ublas::operator==(LExtents, RExtents) : LHS value type should be same as RHS value type"); + + return ( lhs.size() == rhs.size() ) && std::equal(lhs.begin(), lhs.end(), rhs.begin()); +} + +template && is_extents_v + , int> = 0 +> +[[nodiscard]] inline +constexpr bool operator!=(LExtents const& lhs, RExtents const& rhs) noexcept{ + + static_assert( std::is_same_v, + "boost::numeric::ublas::operator!=(LExtents, RExtents) : LHS value type should be same as RHS value type"); + + return !( lhs == rhs ); +} + +} // namespace boost::numeric::ublas + +#endif diff --git a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp new file mode 100644 index 000000000..cdc1840f4 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp @@ -0,0 +1,200 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// +/// \file strides.hpp Definition for the basic_strides template class + + +#ifndef _BOOST_UBLAS_TENSOR_DYNAMIC_STRIDES_HPP_ +#define _BOOST_UBLAS_TENSOR_DYNAMIC_STRIDES_HPP_ + +#include +#include +#include + +namespace boost { +namespace numeric { +namespace ublas { + +using first_order = column_major; +using last_order = row_major; + +template +class basic_extents; + + +/** @brief Template class for storing tensor strides for iteration with runtime variable size. + * + * Proxy template class of std::vector. + * + */ +template +class basic_strides +{ +public: + + using base_type = std::vector<__int_type>; + + static_assert( std::numeric_limits::is_integer, + "Static error in boost::numeric::ublas::basic_strides: type must be of type integer."); + static_assert(!std::numeric_limits::is_signed, + "Static error in boost::numeric::ublas::basic_strides: type must be of type unsigned integer."); + static_assert(std::is_same<__layout,first_order>::value || std::is_same<__layout,last_order>::value, + "Static error in boost::numeric::ublas::basic_strides: layout type must either first or last order"); + + + using layout_type = __layout; + using value_type = typename base_type::value_type; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using size_type = typename base_type::size_type; + using const_pointer = typename base_type::const_pointer; + using const_iterator = typename base_type::const_iterator; + + + /** @brief Default constructs basic_strides + * + * @code auto ex = basic_strides{}; + */ + constexpr explicit basic_strides() + : _base{} + { + } + + /** @brief Constructs basic_strides from basic_extents for the first- and last-order storage formats + * + * @code auto strides = basic_strides( basic_extents{2,3,4} ); + * + */ + template + basic_strides(basic_extents const& s) + : _base(s.size(),1) + { + if( s.empty() ) + return; + + if( !valid(s) ) + throw std::runtime_error("Error in boost::numeric::ublas::basic_strides() : shape is not valid."); + + if( is_vector(s) || is_scalar(s) ) /* */ + return; + + if( this->size() < 2 ) + throw std::runtime_error("Error in boost::numeric::ublas::basic_strides() : size of strides must be greater or equal 2."); + + + if constexpr (std::is_same::value){ + assert(this->size() >= 2u); + size_type k = 1ul, kend = this->size(); + for(; k < kend; ++k) + _base[k] = _base[k-1] * s[k-1]; + } + else { + assert(this->size() >= 2u); + size_type k = this->size()-2, kend = 0ul; + for(; k > kend; --k) + _base[k] = _base[k+1] * s[k+1]; + _base[0] = _base[1] * s[1]; + } + } + + basic_strides(basic_strides const& l) + : _base(l._base) + {} + + basic_strides(basic_strides && l ) + : _base(std::move(l._base)) + {} + + basic_strides(base_type const& l ) + : _base(l) + {} + + basic_strides(base_type && l ) + : _base(std::move(l)) + {} + + ~basic_strides() = default; + + + basic_strides& operator=(basic_strides other) + { + swap (*this, other); + return *this; + } + + friend void swap(basic_strides& lhs, basic_strides& rhs) { + std::swap(lhs._base , rhs._base); + } + + [[nodiscard]] inline + constexpr const_reference operator[] (size_type p) const{ + return _base[p]; + } + + [[nodiscard]] inline + constexpr const_pointer data() const{ + return _base.data(); + } + + [[nodiscard]] inline + constexpr const_reference at (size_type p) const{ + return _base.at(p); + } + + [[nodiscard]] inline + constexpr const_reference back () const{ + return _base[_base.size() - 1]; + } + + [[nodiscard]] inline + constexpr reference back (){ + return _base[_base.size() - 1]; + } + + [[nodiscard]] inline + constexpr bool empty() const{ + return _base.empty(); + } + + [[nodiscard]] inline + constexpr size_type size() const{ + return _base.size(); + } + + [[nodiscard]] inline + constexpr const_iterator begin() const{ + return _base.begin(); + } + + [[nodiscard]] inline + constexpr const_iterator end() const{ + return _base.end(); + } + + inline + constexpr void clear() { + this->_base.clear(); + } + + [[nodiscard]] inline + constexpr base_type const& base() const{ + return this->_base; + } + +protected: + base_type _base; +}; + +} +} +} + +#endif diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index 723f5b11a..9ee07cd49 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -32,8 +32,12 @@ explicit unit_test_framework ; test-suite boost-ublas-tensor-test : - [ run test_algorithms.cpp +<<<<<<< HEAD + [ run test_access.cpp + test_algorithms.cpp test_einstein_notation.cpp + test_subtensor.cpp + test_subtensor_utility.cpp test_expression.cpp test_expression_evaluation.cpp test_extents_dynamic.cpp @@ -72,3 +76,4 @@ test-suite boost-ublas-tensor-test # ] ; + diff --git a/test/tensor/test_access.cpp b/test/tensor/test_access.cpp new file mode 100644 index 000000000..559482ba0 --- /dev/null +++ b/test/tensor/test_access.cpp @@ -0,0 +1,305 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#include +#include + +#include +#include +#include + +#include +#include + + +BOOST_AUTO_TEST_SUITE ( test_access_suite ) + + +using layout_types = std::tuple; + +//zip>::with_t; + +struct fixture { + using extents_t = boost::numeric::ublas::dynamic_extents<>; + using value_t = typename extents_t::value_type; + using multi_index_t = std::vector; + using index_t = value_t; + + fixture() + { + static_assert(shapes.size() == multi_index.size(),""); + static_assert(shapes.size() == indexf.size(),""); + static_assert(shapes.size() == indexl.size(),""); + static_assert(shapes.size() == ranks.size(),""); + + for(auto k = 0u; k < multi_index.size(); ++k){ + auto const& n = shapes[k]; + auto const r = ranks[k]; + assert( n.size() == r ); + for (auto const& i : multi_index[k]){ + assert( std::equal(i.begin(), i.end(), n.begin(), std::less<>{}) ) ; + } + } + } + + + static inline auto shapes = std::array + {{ + { }, + {1,1 }, + + {1,2 }, + {2,1 }, + {2,3 }, + + {2,3,1 }, + {1,2,3 }, + {3,1,2 }, + {3,2,4 }, + + {2,3,4,1}, + {1,2,3,4}, + {3,1,2,4}, + {3,2,4,5} + }}; + + static constexpr inline auto ranks = std::array + { 0,2,2,2,2,3,3,3,3,4,4,4,4 }; + + static inline auto multi_index = std::array,shapes.size()> + {{ + {{ { }, { }, { } }}, // 0 {} + {{ {0,0 }, {0,0 }, {0,0 } }}, // 1 {1,1} + + {{ {0,0 }, {0,1 }, {0,1 } }}, // 2 {1,2} + {{ {0,0 }, {1,0 }, {1,0 } }}, // 3 {2,1} + {{ {0,0 }, {1,1 }, {1,2 } }}, // 4 {2,3} + + {{ {0,0,0 }, {1,1,0 }, {1,2,0 } }}, // 5 {2,3,1} + {{ {0,0,0 }, {0,1,1 }, {0,1,2 } }}, // 6 {1,2,3} + {{ {0,0,0 }, {1,0,1 }, {2,0,1 } }}, // 7 {3,1,2} + {{ {0,0,0 }, {1,1,2 }, {2,1,3 } }}, // 8 {3,2,4} + + {{ {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }}, // 9 {2,3,4,1} + {{ {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }}, //10 {1,2,3,4} + {{ {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }}, //11 {3,1,2,4} + {{ {0,0,0,0}, {1,1,2,3}, {2,1,3,4} }} //12 {3,2,4,5} + }}; + + static constexpr inline auto indexf = std::array,shapes.size()> + {{ + {{0, 0, 0}}, // 0 {} + {{0, 0, 0}}, // 1 {1,1} + {{0, 1, 1}}, // 2 {1,2} + {{0, 1, 1}}, // 3 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} + {{0, 3, 5}}, // 4 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3} + {{0, 3, 5}}, // 5 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1} + {{0, 3, 5}}, // 6 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3} + {{0, 4, 5}}, // 7 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2} + {{0,16, 23}}, // 8 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4}, {1,3,6} + {{0,15, 23}}, // 9 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {1,2,6,6} + {{0,15, 23}}, // 10 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {1,1,2,6} + {{0,16, 23}}, // 11 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, {1,3,3,6} + {{0,88,119}}, // 12 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {1,3,6,24} + }}; + + static constexpr inline auto indexl = std::array,shapes.size()> + {{ + {{0, 0, 0}}, // 0 {} + {{0, 0, 0}}, // 1 {1,1} + {{0, 1, 1}}, // 2 {1,2} + {{0, 1, 1}}, // 3 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1 }, {1,1} + {{0, 4, 5}}, // 4 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3 }, {3,1} + {{0, 4, 5}}, // 5 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1 }, {3,1,1} + {{0, 4, 5}}, // 6 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3 }, {6,3,1} + {{0, 3, 5}}, // 7 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2 }, {2,2,1} + {{0,14, 23}}, // 8 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4 }, {8,4,1} + {{0,18, 23}}, // 9 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {12, 4,1,1} + {{0,18, 23}}, // 10 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {24,12,4,1} + {{0,14, 23}}, // 11 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, { 8, 8,4,1} + {{0,73,119}}, // 12 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {40,20,5,1} + }}; + + template + constexpr inline auto prodn(extents_type const& n) + { + return std::accumulate(n.begin(),n.end(),1ul, std::multiplies<>{}); + } + + // static constexpr inline auto const& e = shapes; + // static constexpr inline auto const& i = multi_indices; + + + // template struct x { static inline constexpr auto value = e[k][r]*x::value; }; + // template struct x { static inline constexpr auto value = 1; }; + // template struct x { static inline constexpr auto value = 1*x::value; }; + + // template struct y { static inline constexpr auto value = e[k][r ]*y::value; }; + // template struct y { static inline constexpr auto value = 1*y::value; }; + // template struct y { static inline constexpr auto value = e[k][p-1]; }; + + + // template static inline constexpr auto wf = x::value; + // template static inline constexpr auto wl = y::value; + + // template struct zf { static inline constexpr auto value = i[k][kk][r]*wf + zf::value; }; + // template struct zf<0,k,kk> { static inline constexpr auto value = i[k][kk][0]*wf; }; + + // template static inline constexpr auto c2 = zf<2,k,kk>::value; + // template static inline constexpr auto c3 = zf<3,k,kk>::value; + // template static inline constexpr auto c4 = zf<4,k,kk>::value; + + + +}; + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index, layout_t, layout_types, fixture ) +{ + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; + using strides_t = ub::basic_strides; + + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + mp::mp_for_each>( [&]( auto I ) { + auto const& n = std::get(shapes); + auto const& i = std::get(multi_index); + auto const& jref = std::get(index); + mp::mp_for_each>( [&]( auto K ) { + auto const& ii = std::get(i); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,strides_t(n).begin()); + BOOST_CHECK(j < prodn(n)); + BOOST_CHECK_EQUAL(j,jref[K]); + }); + }); +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_static_rank, layout_t, layout_types, fixture ) +{ + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; + using strides_t = ub::basic_strides; + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + mp::mp_for_each>( [&]( auto I ) { + auto const& n = std::get(shapes); + auto const& i = std::get(multi_index); + auto const& jref = std::get(index); + constexpr auto r = std::get(ranks); + mp::mp_for_each>( [&]( auto K ) { + auto const& ii = std::get(i); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,strides_t(n).begin()); + BOOST_CHECK(j < prodn(n)); + BOOST_CHECK_EQUAL(j,jref[K]); + }); + }); +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index, layout_t, layout_types, fixture ) +{ + using namespace boost::numeric::ublas; + using strides_t = basic_strides; + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + for(auto k = 0u; k < index.size(); ++k){ + auto const& n = shapes[k]; + auto const& iref = multi_index[k]; + auto const& w = strides_t(n); + auto const& jref = index[k]; + for(auto kk = 0u; kk < iref.size(); ++kk){ + auto const jj = jref[kk]; + auto const& ii = iref[kk]; + auto i = multi_index_t(w.size()); + //detail::compute_multi_index(jj, w.begin(), w.end(), i.begin()); + if constexpr ( is_first_order ) + detail::compute_multi_index_first(jj, w.begin(), w.end(), i.begin()); + else + detail::compute_multi_index_last (jj, w.begin(), w.end(), i.begin()); + + std::cout << "j= " << jj << std::endl; + std::cout << "i= [ "; for(auto iii : i) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << "ii_= [ "; for(auto iii : ii) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << "n= [ "; for(auto iii : n) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << "w= [ "; for(auto iii : w) std::cout << iii << " "; std::cout << "];" << std::endl; + std::cout << std::endl; + + + + BOOST_CHECK ( std::equal(i.begin(),i.end(),ii.begin()) ) ; + } + } +} + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t, layout_types, fixture ) +{ + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; + using strides_t = ub::basic_strides; + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + + mp::mp_for_each>( [&]( auto I ) { + auto const& n = std::get(shapes); + auto const& iref = std::get(multi_index); + auto const& jref = std::get(index); + auto const& w = strides_t(n); + constexpr auto r = std::get(ranks); + mp::mp_for_each>( [&]( auto K ) { + auto const jj = std::get(jref); + auto const& ii = std::get(iref); + auto i = multi_index_t(w.size()); + ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin()); + BOOST_CHECK ( std::equal(i.begin(),i.end(),ii.begin()) ) ; + }); + }); +} + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_subtensor, layout_t, layout_types, fixture ) +{ + using namespace boost::numeric::ublas; + using strides_t = basic_strides; + + // subtensor the whole index-domain of a tensor + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + // subtensor the whole index-domain of a tensor + for(auto k = 0u; k < index.size(); ++k){ + auto const& n = shapes[k]; + auto const& w = strides_t(n); + auto const& jref = index[k]; + for(auto kk = 0u; kk < jref.size(); ++kk){ + auto const jj = jref[kk]; + auto const j = detail::compute_single_index(jj,w.begin(),w.end(),w.begin()); + BOOST_CHECK_EQUAL ( j, jj ) ; + } + } +} + + + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index e8c5aed19..2c4cace00 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -142,7 +142,7 @@ BOOST_FIXTURE_TEST_CASE( transform_strided_span_test, fixture_strided_span ) struct fixture_shape { using shape = boost::numeric::ublas::basic_extents; - fixture_shape() : extents{ + fixture_shape() : extents{ shape{}, // 0 shape{1,1}, // 1 shape{1,2}, // 2 @@ -153,15 +153,15 @@ struct fixture_shape { shape{1,2,3}, // 7 shape{4,2,3}, // 8 shape{4,2,3,5} // 9 - } + } {} - std::vector extents; + std::vector extents; }; BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) { using namespace boost::numeric::ublas; - using span = sliced_span; + using span = sliced_span; // shape{} { From 03e7fd6ee301a018654218aa747eaa005a5a325a Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Mon, 6 Jul 2020 08:24:39 +0200 Subject: [PATCH 05/40] stride creation changed. --- include/boost/numeric/ublas/tensor/access.hpp | 119 +++++--- .../ublas/tensor/detail/extents_functions.hpp | 278 +++++++++++------- .../numeric/ublas/tensor/dynamic_strides.hpp | 39 +-- .../numeric/ublas/tensor/multiplication.hpp | 6 - test/tensor/test_access.cpp | 106 ++++--- 5 files changed, 331 insertions(+), 217 deletions(-) diff --git a/include/boost/numeric/ublas/tensor/access.hpp b/include/boost/numeric/ublas/tensor/access.hpp index e83e588c9..108c600cc 100644 --- a/include/boost/numeric/ublas/tensor/access.hpp +++ b/include/boost/numeric/ublas/tensor/access.hpp @@ -18,6 +18,16 @@ #include #include +#include + + +namespace boost::numeric::ublas { + +using first_order = column_major; +using last_order = row_major; + +} + namespace boost::numeric::ublas::detail{ @@ -57,65 +67,64 @@ constexpr inline auto compute_single_index(InputIt1 i, InputIt1 /*ip*/, InputIt2 * @param wp end input iterator to a container with tensor or subtensor strides of length std::distance(begin,end) * @param i begin output iterator to a container with tensor or subtensor indices length std::distance(begin,end) or greater */ +template +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, LayoutType l); + + template -constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, first_order ) { if(w == wp) return; - auto p = std::distance(w,wp); - auto kq = j; - //auto q = 0ul; + auto wr = std::make_reverse_iterator( w ); + auto wrp = std::make_reverse_iterator( wp ); + auto ir = std::make_reverse_iterator( i+std::distance(w,wp) ); - - for(int r = p-1; r >= 0; --r) - { - //q = l[r]-1; - i[r] = kq/w[r]; - kq -= w[r]*i[r]; - } - - //std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); + std::transform(wrp,wr,ir, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); } - template -constexpr inline void compute_multi_index_first(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, last_order ) { if(w == wp) return; - auto p = std::distance(w,wp); - auto kq = j; - //auto q = 0ul; + std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +} -// for(int r = p-1; r >= 0; --r) -// { -// //q = l[r]-1; -// i[r] = kq/w[r]; -// kq -= w[r]*i[r]; -// } - std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); -} -template -constexpr inline void compute_multi_index_last(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) -{ - if(w == wp) - return; - auto p = std::distance(w,wp); - auto kq = j; +//template +//constexpr inline void compute_multi_index_last(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +//{ +// if(w == wp) +// return; +//// for(unsigned r = 0ul; r < p; ++r) { +//// i[r] = kq/w[r]; +//// kq -= w[r]*i[r]; +//// } +// std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +//} - for(unsigned r = 0ul; r < p; ++r) { - i[r] = kq/w[r]; - kq -= w[r]*i[r]; - } +//template +//constexpr inline void compute_multi_index_first(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i) +//{ +// if(w == wp) +// return; - //std::transform(w,wp,i, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); -} +//// for(int r = p-1; r >= 0; --r) { +//// i[r] = kq/w[r]; +//// kq -= w[r]*i[r]; +//// } +// auto wr = std::make_reverse_iterator( w ); +// auto wrp = std::make_reverse_iterator( wp ); +// auto ir = std::make_reverse_iterator( i+std::distance(w,wp) ); + +// std::transform(wrp,wr,ir, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +//} /** @brief Computes a single index from a multi-index of a dense tensor or subtensor @@ -124,15 +133,39 @@ constexpr inline void compute_multi_index_last(std::size_t j, InputIt1 w, InputI * @param w begin input iterator to a container with strides of length p * @param i begin input iterator to a container with indices of length p or greater */ +template +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i, LayoutType); + + template -constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i) +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i, first_order o) { - if constexpr (p==0u) return; - else if constexpr (p >1u) {i[p-1]=j/w[p-1]; compute_multi_index(j-w[p-1]*i[p-1],w,w,i); } - else {i[p-1]=j/w[p-1]; } + if constexpr (p==0u) return; + else if constexpr (p >1u) {i[p-1]=j/w[p-1]; compute_multi_index(j-w[p-1]*i[p-1],w,w,i,o); } + else {i[p-1]=j/w[p-1]; } } + +template +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i, last_order o) +{ + if constexpr (p == 0u ) { return; } + else if constexpr (k+1 == p) {i[k]=j/w[k]; } + else {i[k]=j/w[k]; compute_multi_index(j-w[k]*i[k],w,w,i,o); } +} + + + +//template +//constexpr inline void compute_multi_index_last(std::size_t j, InputIt1 w, InputIt1 /*wp*/, OutputIt i) +//{ +// if constexpr (p == 0u ) return; +// else if constexpr (k+1 == p) {i[k]=j/w[k]; } +// else {i[k]=j/w[k]; compute_multi_index_last(j-w[k]*i[k],w,w,i); } +//} + + /** @brief Computes a single (relative memory) index of a dense tensor from a single index of one of its subtensor * * @param jv single index of a subtensor that is transformed into a single index of a dense tensor diff --git a/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp b/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp index 4273829d1..479555e57 100644 --- a/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp +++ b/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp @@ -23,43 +23,43 @@ namespace boost::numeric::ublas::detail{ - template - constexpr auto push_back(basic_static_extents) -> basic_static_extents; - - template - constexpr auto push_front(basic_static_extents) -> basic_static_extents; - - template - constexpr auto squeeze_impl_remove_one( basic_static_extents, basic_static_extents num = basic_static_extents{} ){ - // executed when basic_static_extents is size of 1 - // @code basic_static_extents @endcode - if constexpr( sizeof...(E) == 0ul ){ - // if element E0 is 1 we return number list but we do not append - // it to the list - if constexpr( E0 == T(1) ){ - return num; - }else{ - // if element E0 is 1 we return number list but we append - // it to the list - return decltype(push_back(num)){}; - } - }else{ - if constexpr( E0 == T(1) ){ - // if element E0 is 1 we return number list but we do not append - // it to the list - return squeeze_impl_remove_one(basic_static_extents{}, num); - }else{ - // if element E0 is 1 we return number list but we append - // it to the list - auto n_num_list = decltype(push_back(num)){}; - return squeeze_impl_remove_one(basic_static_extents{}, n_num_list); - } - } +template +constexpr auto push_back(basic_static_extents) -> basic_static_extents; + +template +constexpr auto push_front(basic_static_extents) -> basic_static_extents; + +template +constexpr auto squeeze_impl_remove_one( basic_static_extents, basic_static_extents num = basic_static_extents{} ){ +// executed when basic_static_extents is size of 1 +// @code basic_static_extents @endcode +if constexpr( sizeof...(E) == 0ul ){ + // if element E0 is 1 we return number list but we do not append + // it to the list + if constexpr( E0 == T(1) ){ + return num; + }else{ + // if element E0 is 1 we return number list but we append + // it to the list + return decltype(push_back(num)){}; + } +}else{ + if constexpr( E0 == T(1) ){ + // if element E0 is 1 we return number list but we do not append + // it to the list + return squeeze_impl_remove_one(basic_static_extents{}, num); + }else{ + // if element E0 is 1 we return number list but we append + // it to the list + auto n_num_list = decltype(push_back(num)){}; + return squeeze_impl_remove_one(basic_static_extents{}, n_num_list); } +} +} + +template +constexpr auto squeeze_impl( basic_static_extents const& e ){ - template - constexpr auto squeeze_impl( basic_static_extents const& e ){ - using extents_type = basic_static_extents; if constexpr( extents_type::_size <= typename extents_type::size_type(2) ){ @@ -75,7 +75,7 @@ namespace boost::numeric::ublas::detail{ // if same that means 1s does not exist and no need to // squeeze if constexpr( decltype(one_free_static_extents)::_size != extents_type::_size ){ - + // after squeezing, all the extents are 1s we need to // return extents of (1, 1) if constexpr( decltype(one_free_static_extents)::_size == size_type(0) ){ @@ -99,17 +99,17 @@ namespace boost::numeric::ublas::detail{ }else{ return e; } - - } +} - template - inline - constexpr auto squeeze_impl( basic_extents const& e ){ +template +[[nodiscard]] inline constexpr + auto squeeze_impl( basic_extents const& e ) +{ using extents_type = basic_extents; using base_type = typename extents_type::base_type; using value_type = typename extents_type::value_type; using size_type = typename extents_type::size_type; - + if( e.size() <= size_type(2) ){ return e; } @@ -120,36 +120,36 @@ namespace boost::numeric::ublas::detail{ // count non one values size_type size = std::count_if(e.begin(), e.end(), not_one); - + // reserve space base_type n_extents( std::max(size, size_type(2)), 1 ); - + // copying non 1s to the new extents std::copy_if(e.begin(), e.end(), n_extents.begin(), not_one); // checking if extents size goes blow 2 // if size of extents goes to 1 // complying with GNU Octave - // if position 2 contains 1 we + // if position 2 contains 1 we // swap the pos if( size < size_type(2) && e[1] != value_type(1) ){ std::swap(n_extents[0], n_extents[1]); } - - return extents_type(n_extents); - } - template - inline - auto squeeze_impl( basic_fixed_rank_extents const& e ){ - if constexpr( N <= 2 ){ - return e; - }else{ - return squeeze_impl(basic_extents(e)); + return extents_type(n_extents); +} + +template +[[nodiscard]] inline constexpr + auto squeeze_impl( basic_fixed_rank_extents const& e ) +{ + if constexpr( N <= 2u ){ + return e; } - } - - + else{ + return squeeze_impl(basic_extents(e)); + } +} } // namespace boost::numeric::ublas::detail @@ -157,14 +157,19 @@ namespace boost::numeric::ublas { /** @brief Returns true if size > 1 and all elements > 0 or size == 1 && e[0] == 1 */ template -[[nodiscard]] inline -constexpr bool valid(ExtentsType const &e) { +[[nodiscard]] inline constexpr + bool valid(ExtentsType const &e) +{ - static_assert(is_extents_v, "boost::numeric::ublas::valid() : invalid type, type should be an extents"); + static_assert(is_extents_v, + "boost::numeric::ublas::valid() : " + "invalid type, type should be an extents"); - auto greater_than_zero = [](auto const& a){ return a > 0u; }; + auto greater_than_zero = [](auto a){ return a > 0u; }; - if( e.size() == 1u ) return e[0] == 1u; + if( e.size() == 1u ) { + return e[0] == 1u; + } return !e.empty() && std::all_of(e.begin(), e.end(), greater_than_zero ); } @@ -197,73 +202,143 @@ std::string to_string(T const &e) { return ss.str(); } + + +/** @brief Returns true if this has a matrix shape + * + * @returns true if (1,1,[1,...,1]) + * + * @param first input iterator pointing to the start of a shape object + * @param last input iterator pointing to the end of a shape object + */ +template +[[nodiscard]] inline constexpr + bool is_scalar(InputIt first, InputIt last) +{ + return std::distance(first,last)>0u && + std::all_of (first,last,[](auto a){return a==1u;}); +} + + /** @brief Returns true if this has a scalar shape * * @returns true if (1,1,[1,...,1]) */ template -[[nodiscard]] inline -constexpr bool is_scalar(ExtentsType const &e) { +[[nodiscard]] inline constexpr + bool is_scalar(ExtentsType const &e) +{ + static_assert(is_extents_v, + "boost::numeric::ublas::is_scalar() : " + "invalid type, type should be an extents"); + return is_scalar(e.begin(),e.end()); +} - static_assert(is_extents_v, "boost::numeric::ublas::is_scalar() : invalid type, type should be an extents"); - - auto equal_one = [](auto const &a) { return a == 1u; }; - return !e.empty() && std::all_of(e.begin(), e.end(), equal_one); + +/** @brief Returns true if this has a matrix shape + * + * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 + * + * @param first input iterator pointing to the start of a shape object + * @param last input iterator pointing to the end of a shape object + */ +template +[[nodiscard]] inline constexpr + bool is_vector(InputIt first, InputIt last) +{ + if (std::distance(first,last) == 0u) return false; + if (std::distance(first,last) == 1u) return *first > 1u; + + return std::any_of(first ,first+2, [](auto a){return a >1u;}) && + std::any_of(first ,first+2, [](auto a){return a==1u;}) && + std::all_of(first+2,last , [](auto a){return a==1u;}); } + /** @brief Returns true if this has a vector shape * * @returns true if (1,n,[1,...,1]) or (n,1,[1,...,1]) with n > 1 + * + * @param e extents with boost::numeric::ublas::is_valid(e) + * and supporting e.begin() and e.end() */ template -[[nodiscard]] inline -constexpr bool is_vector(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_vector() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u; }; - auto equal_one = [](auto const &a) { return a == 1u; }; +[[nodiscard]] inline constexpr + bool is_vector(ExtentsType const &e) +{ + static_assert(is_extents_v, + "boost::numeric::ublas::is_vector() : " + "invalid type, type should be an extents"); + + return is_vector(e.begin(),e.end()); +} - if (e.size() == 0u) return false; - else if (e.size() == 1u) return e[0] > 1u; - else return std::any_of(e.begin(), e.begin() + 2, greater_one) && - std::any_of(e.begin(), e.begin() + 2, equal_one) && - std::all_of(e.begin() + 2, e.end(), equal_one); +/** @brief Returns true if this has a matrix shape + * + * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 + * + * @param first input iterator pointing to the start of a shape object + * @param last input iterator pointing to the end of a shape object + */ +template +[[nodiscard]] inline constexpr + bool is_matrix(InputIt first, InputIt last) +{ + return std::distance(first,last)>=2u && + std::all_of(first , first+2, [](auto a){return a >1u;}) && + std::all_of(first+2, last , [](auto a){return a==1u;}); } /** @brief Returns true if this has a matrix shape * * @returns true if (m,n,[1,...,1]) with m > 1 and n > 1 + * + * @param e extents with boost::numeric::ublas::is_valid(e) + * and supporting e.begin() and e.end() */ template -[[nodiscard]] inline -constexpr bool is_matrix(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_matrix() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u; }; - auto equal_one = [](auto const &a) { return a == 1u; }; +[[nodiscard]] inline constexpr + bool is_matrix(ExtentsType const &e) +{ + static_assert(is_extents_v, + "boost::numeric::ublas::is_matrix() : " + "invalid type, type should be an extents"); + return is_matrix(e.begin(),e.end()); +} - return ( e.size() >= 2u ) && std::all_of(e.begin(), e.begin() + 2, greater_one) && - std::all_of(e.begin() + 2, e.end(), equal_one); +/** @brief Returns true if this is has a tensor shape + * + * @returns true if !empty() && !is_scalar() && !is_vector() && !is_matrix() + * + * @param first input iterator pointing to the start of a shape object + * @param last input iterator pointing to the end of a shape object + */ +template +[[nodiscard]] inline constexpr + bool is_tensor(InputIt first, InputIt last) +{ + return std::distance(first,last)>=3u && + std::any_of(first+2, last, [](auto a){return a>1u;}); } /** @brief Returns true if this is has a tensor shape * * @returns true if !empty() && !is_scalar() && !is_vector() && !is_matrix() + * + * @param e extents with boost::numeric::ublas::is_valid(e) + * supporting e.begin() and e.end() */ template -[[nodiscard]] inline -constexpr bool is_tensor(ExtentsType const &e) { - - static_assert(is_extents_v, "boost::numeric::ublas::is_tensor() : invalid type, type should be an extents"); - - auto greater_one = [](auto const &a) { return a > 1u;}; - - return ( e.size() >= 3u ) && std::any_of(e.begin() + 2, e.end(), greater_one); +[[nodiscard]] inline constexpr + bool is_tensor(ExtentsType const &e) +{ + static_assert(is_extents_v, + "boost::numeric::ublas::is_tensor() : " + "invalid type, type should be an extents"); + return is_tensor(e.begin(),e.end()); } /** @brief Eliminates singleton dimensions when size > 2 @@ -280,11 +355,12 @@ constexpr bool is_tensor(ExtentsType const &e) { */ template [[nodiscard]] inline -auto squeeze(ExtentsType const &e) { - + auto squeeze(ExtentsType const &e) +{ + static_assert(is_extents_v, "boost::numeric::ublas::squeeze() : invalid type, type should be an extents"); - return detail::squeeze_impl(e); + return detail::squeeze_impl(e); } /** @brief Returns the product of extents */ diff --git a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp index cdc1840f4..45001f179 100644 --- a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp +++ b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp @@ -74,35 +74,38 @@ class basic_strides * */ template - basic_strides(basic_extents const& s) - : _base(s.size(),1) + basic_strides(basic_extents const& n) + : _base(n.size(),1) { - if( s.empty() ) + if( n.empty() ) return; - if( !valid(s) ) + if( !valid(n) ) throw std::runtime_error("Error in boost::numeric::ublas::basic_strides() : shape is not valid."); - if( is_vector(s) || is_scalar(s) ) /* */ - return; +// if( is_vector(s) || is_scalar(s) ) /* */ +// return; + + const auto p = this->size(); - if( this->size() < 2 ) + if( p < 2 ) throw std::runtime_error("Error in boost::numeric::ublas::basic_strides() : size of strides must be greater or equal 2."); + assert(p >= 2u); + auto& w = _base; - if constexpr (std::is_same::value){ - assert(this->size() >= 2u); - size_type k = 1ul, kend = this->size(); - for(; k < kend; ++k) - _base[k] = _base[k-1] * s[k-1]; + auto q = base_type(_base.size()); + if( std::is_same_v){ + std::iota(q.begin(), q.end(), 0u); } - else { - assert(this->size() >= 2u); - size_type k = this->size()-2, kend = 0ul; - for(; k > kend; --k) - _base[k] = _base[k+1] * s[k+1]; - _base[0] = _base[1] * s[1]; + else{ + std::iota(q.rbegin(), q.rend(), 0u); } + + w[ q[0] ] = 1u; + for(auto k = 1u; k < p; ++k) + w[ q[k] ] = w[ q[k-1] ] * n [ q[k-1] ]; + } basic_strides(basic_strides const& l) diff --git a/include/boost/numeric/ublas/tensor/multiplication.hpp b/include/boost/numeric/ublas/tensor/multiplication.hpp index 6a9c0613b..e2d94f7be 100644 --- a/include/boost/numeric/ublas/tensor/multiplication.hpp +++ b/include/boost/numeric/ublas/tensor/multiplication.hpp @@ -645,12 +645,6 @@ void ttv(SizeType const m, SizeType const p, } } - for(auto i = m; i < p; ++i){ - if(na[i] != nc[i-1]){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); - } - } - const auto max = std::max(nb[0], nb[1]); if( na[m-1] != max){ throw std::length_error("Error in boost::numeric::ublas::ttv: Extent of dimension mode of A and b must be equal."); diff --git a/test/tensor/test_access.cpp b/test/tensor/test_access.cpp index 559482ba0..57ea432f9 100644 --- a/test/tensor/test_access.cpp +++ b/test/tensor/test_access.cpp @@ -51,13 +51,15 @@ struct fixture { } - static inline auto shapes = std::array + static inline auto shapes = std::array {{ { }, {1,1 }, {1,2 }, + {1,4 }, {2,1 }, + {4,1 }, {2,3 }, {2,3,1 }, @@ -72,7 +74,7 @@ struct fixture { }}; static constexpr inline auto ranks = std::array - { 0,2,2,2,2,3,3,3,3,4,4,4,4 }; + { 0,2,2,2,2,2,2,3,3,3,3,4,4,4,4 }; static inline auto multi_index = std::array,shapes.size()> {{ @@ -80,52 +82,58 @@ struct fixture { {{ {0,0 }, {0,0 }, {0,0 } }}, // 1 {1,1} {{ {0,0 }, {0,1 }, {0,1 } }}, // 2 {1,2} - {{ {0,0 }, {1,0 }, {1,0 } }}, // 3 {2,1} - {{ {0,0 }, {1,1 }, {1,2 } }}, // 4 {2,3} - - {{ {0,0,0 }, {1,1,0 }, {1,2,0 } }}, // 5 {2,3,1} - {{ {0,0,0 }, {0,1,1 }, {0,1,2 } }}, // 6 {1,2,3} - {{ {0,0,0 }, {1,0,1 }, {2,0,1 } }}, // 7 {3,1,2} - {{ {0,0,0 }, {1,1,2 }, {2,1,3 } }}, // 8 {3,2,4} - - {{ {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }}, // 9 {2,3,4,1} - {{ {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }}, //10 {1,2,3,4} - {{ {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }}, //11 {3,1,2,4} - {{ {0,0,0,0}, {1,1,2,3}, {2,1,3,4} }} //12 {3,2,4,5} + {{ {0,0 }, {0,2 }, {0,3 } }}, // 3 {1,4} + {{ {0,0 }, {1,0 }, {1,0 } }}, // 4 {2,1} + {{ {0,0 }, {2,0 }, {3,0 } }}, // 5 {4,1} + {{ {0,0 }, {1,1 }, {1,2 } }}, // 6 {2,3} + + {{ {0,0,0 }, {1,1,0 }, {1,2,0 } }}, // 7 {2,3,1} + {{ {0,0,0 }, {0,1,1 }, {0,1,2 } }}, // 8 {1,2,3} + {{ {0,0,0 }, {1,0,1 }, {2,0,1 } }}, // 9 {3,1,2} + {{ {0,0,0 }, {1,1,2 }, {2,1,3 } }}, //10 {3,2,4} + + {{ {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }}, //11 {2,3,4,1} + {{ {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }}, //12 {1,2,3,4} + {{ {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }}, //13 {3,1,2,4} + {{ {0,0,0,0}, {1,1,2,3}, {2,1,3,4} }} //14 {3,2,4,5} }}; static constexpr inline auto indexf = std::array,shapes.size()> {{ - {{0, 0, 0}}, // 0 {} + {{0, 0, 0}}, // 0 {} {{0, 0, 0}}, // 1 {1,1} - {{0, 1, 1}}, // 2 {1,2} - {{0, 1, 1}}, // 3 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} - {{0, 3, 5}}, // 4 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3} - {{0, 3, 5}}, // 5 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1} - {{0, 3, 5}}, // 6 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3} - {{0, 4, 5}}, // 7 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2} - {{0,16, 23}}, // 8 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4}, {1,3,6} - {{0,15, 23}}, // 9 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {1,2,6,6} - {{0,15, 23}}, // 10 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {1,1,2,6} - {{0,16, 23}}, // 11 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, {1,3,3,6} - {{0,88,119}}, // 12 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {1,3,6,24} + {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} + {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} + {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} + {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} + {{0, 3, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3} + {{0, 3, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1} + {{0, 3, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3} + {{0, 4, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2} + {{0,16, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4}, {1,3,6} + {{0,15, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {1,2,6,6} + {{0,15, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {1,1,2,6} + {{0,16, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, {1,3,3,6} + {{0,88,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {1,3,6,24} }}; static constexpr inline auto indexl = std::array,shapes.size()> {{ {{0, 0, 0}}, // 0 {} {{0, 0, 0}}, // 1 {1,1} - {{0, 1, 1}}, // 2 {1,2} - {{0, 1, 1}}, // 3 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1 }, {1,1} - {{0, 4, 5}}, // 4 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3 }, {3,1} - {{0, 4, 5}}, // 5 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1 }, {3,1,1} - {{0, 4, 5}}, // 6 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3 }, {6,3,1} - {{0, 3, 5}}, // 7 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2 }, {2,2,1} - {{0,14, 23}}, // 8 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4 }, {8,4,1} - {{0,18, 23}}, // 9 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {12, 4,1,1} - {{0,18, 23}}, // 10 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {24,12,4,1} - {{0,14, 23}}, // 11 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, { 8, 8,4,1} - {{0,73,119}}, // 12 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {40,20,5,1} + {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} + {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} + {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} + {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} + {{0, 4, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3 }, {3,1} + {{0, 4, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1 }, {3,1,1} + {{0, 4, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3 }, {6,3,1} + {{0, 3, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2 }, {2,2,1} + {{0,14, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4 }, {8,4,1} + {{0,18, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {12, 4,1,1} + {{0,18, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {24,12,4,1} + {{0,14, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, { 8, 8,4,1} + {{0,73,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {40,20,5,1} }}; template @@ -228,18 +236,18 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index, layout_t, layout_ty auto const jj = jref[kk]; auto const& ii = iref[kk]; auto i = multi_index_t(w.size()); - //detail::compute_multi_index(jj, w.begin(), w.end(), i.begin()); - if constexpr ( is_first_order ) - detail::compute_multi_index_first(jj, w.begin(), w.end(), i.begin()); - else - detail::compute_multi_index_last (jj, w.begin(), w.end(), i.begin()); + detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); +// if constexpr ( is_first_order ) +// detail::compute_multi_index_first(jj, w.begin(), w.end(), i.begin()); +// else +// detail::compute_multi_index_last (jj, w.begin(), w.end(), i.begin()); - std::cout << "j= " << jj << std::endl; - std::cout << "i= [ "; for(auto iii : i) std::cout << iii << " "; std::cout << "];" << std::endl; - std::cout << "ii_= [ "; for(auto iii : ii) std::cout << iii << " "; std::cout << "];" << std::endl; - std::cout << "n= [ "; for(auto iii : n) std::cout << iii << " "; std::cout << "];" << std::endl; - std::cout << "w= [ "; for(auto iii : w) std::cout << iii << " "; std::cout << "];" << std::endl; - std::cout << std::endl; +// std::cout << "j= " << jj << std::endl; +// std::cout << "i= [ "; for(auto iii : i) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << "ii_ref = [ "; for(auto iii : ii) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << "n= [ "; for(auto iii : n) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << "w= [ "; for(auto iii : w) std::cout << iii << " "; std::cout << "];" << std::endl; +// std::cout << std::endl; @@ -268,7 +276,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t auto const jj = std::get(jref); auto const& ii = std::get(iref); auto i = multi_index_t(w.size()); - ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin()); + ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); BOOST_CHECK ( std::equal(i.begin(),i.end(),ii.begin()) ) ; }); }); From 4bbbcc498b5021c313016c9858902e302dff3034 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Mon, 24 May 2021 22:13:55 +0200 Subject: [PATCH 06/40] qtcreator pri file adjusted. --- IDEs/qtcreator/include/tensor/tensor.pri | 1 + 1 file changed, 1 insertion(+) diff --git a/IDEs/qtcreator/include/tensor/tensor.pri b/IDEs/qtcreator/include/tensor/tensor.pri index 3dc0dc2f0..10ce1d42e 100644 --- a/IDEs/qtcreator/include/tensor/tensor.pri +++ b/IDEs/qtcreator/include/tensor/tensor.pri @@ -1,4 +1,5 @@ HEADERS += \ + $${INCLUDE_DIR}/boost/numeric/ublas/tensor/access.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/algorithms.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/expression.hpp \ $${INCLUDE_DIR}/boost/numeric/ublas/tensor/expression_evaluation.hpp \ From 28490869b96eb41822c4bf3dd27a08dc1048b2f4 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Fri, 28 May 2021 15:09:58 +0200 Subject: [PATCH 07/40] fixing tesor-vector multiplication for subtensor. --- IDEs/qtcreator/test/test_tensor.pro | 4 +- include/boost/numeric/ublas/tensor/access.hpp | 26 +- .../boost/numeric/ublas/tensor/concepts.hpp | 1 + .../tensor/extents/extents_functions.hpp | 11 +- .../tensor/function/tensor_times_vector.hpp | 3 +- include/boost/numeric/ublas/tensor/span.hpp | 27 +- .../boost/numeric/ublas/tensor/subtensor.hpp | 209 +++++----- .../ublas/tensor/subtensor_utility.hpp | 78 ++-- include/boost/numeric/ublas/tensor/tags.hpp | 28 +- test/tensor/test_access.cpp | 371 +++++++++--------- test/tensor/test_algorithms.cpp | 4 - test/tensor/test_functions.cpp | 8 + test/tensor/test_strides.cpp | 4 +- test/tensor/test_subtensor.cpp | 259 ++++++------ test/tensor/test_subtensor_utility.cpp | 85 ++-- 15 files changed, 547 insertions(+), 571 deletions(-) diff --git a/IDEs/qtcreator/test/test_tensor.pro b/IDEs/qtcreator/test/test_tensor.pro index fffe1de57..70fcec20c 100644 --- a/IDEs/qtcreator/test/test_tensor.pro +++ b/IDEs/qtcreator/test/test_tensor.pro @@ -5,13 +5,11 @@ CONFIG += staticlib depend_includepath console CONFIG -= qt CONFIG += c++20 -CONFIG += c++17 - #QMAKE_CXXFLAGS += -fno-inline QMAKE_CXXFLAGS =-std=c++20 QMAKE_CXXFLAGS +=-Wall -Wpedantic -Wextra QMAKE_CXXFLAGS +=-Wno-unknown-pragmas -QMAKE_CXXFLAGS +=-Wno-unused-but-set-variable +#QMAKE_CXXFLAGS +=-Wno-unused-but-set-variable gcc:QMAKE_CXXFLAGS_RELEASE =-O3 -march=native -fopenmp diff --git a/include/boost/numeric/ublas/tensor/access.hpp b/include/boost/numeric/ublas/tensor/access.hpp index 108c600cc..525282fd9 100644 --- a/include/boost/numeric/ublas/tensor/access.hpp +++ b/include/boost/numeric/ublas/tensor/access.hpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -10,8 +10,8 @@ // -#ifndef _BOOST_UBLAS_TENSOR_ACCESS_HPP -#define _BOOST_UBLAS_TENSOR_ACCESS_HPP +#ifndef BOOST_UBLAS_TENSOR_ACCESS_HPP +#define BOOST_UBLAS_TENSOR_ACCESS_HPP #include @@ -26,7 +26,7 @@ namespace boost::numeric::ublas { using first_order = column_major; using last_order = row_major; -} +} // namespace boost::numeric::ublas namespace boost::numeric::ublas::detail{ @@ -68,11 +68,21 @@ constexpr inline auto compute_single_index(InputIt1 i, InputIt1 /*ip*/, InputIt2 * @param i begin output iterator to a container with tensor or subtensor indices length std::distance(begin,end) or greater */ template -constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, LayoutType l); +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, LayoutType /*unused*/); +//{ +// if(w == wp) +// return; + +// auto wr = std::make_reverse_iterator( w ); +// auto wrp = std::make_reverse_iterator( wp ); +// auto ir = std::make_reverse_iterator( i+std::distance(w,wp) ); + +// std::transform(wrp,wr,ir, [&j](auto v) { auto k=j/v; j-=v*k; return k; } ); +//} template -constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, first_order ) +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, first_order /*unused*/) { if(w == wp) return; @@ -85,7 +95,7 @@ constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp } template -constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, last_order ) +constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 wp, OutputIt i, last_order /*unused*/) { if(w == wp) return; @@ -182,6 +192,6 @@ constexpr inline auto compute_single_index(std::size_t jv, InputIt1 w, InputIt1 ); } -} // namespace +} // namespace boost::numeric::ublas::detail #endif diff --git a/include/boost/numeric/ublas/tensor/concepts.hpp b/include/boost/numeric/ublas/tensor/concepts.hpp index 70820484a..1a293554c 100644 --- a/include/boost/numeric/ublas/tensor/concepts.hpp +++ b/include/boost/numeric/ublas/tensor/concepts.hpp @@ -15,6 +15,7 @@ #include + namespace boost::numeric::ublas{ template diff --git a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp index 85e64ff8f..149e12bde 100644 --- a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp +++ b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp @@ -153,9 +153,12 @@ template { auto s = typename extents_core::base_type(e.size(),1ul); - if(empty(e) || is_vector(e) || is_scalar(e)){ + if(empty(e) || is_scalar(e)){ return s; } + + // || is_vector(e) + if constexpr(std::is_same_v){ std::transform(begin (e), end (e) - 1, s.begin (), s.begin ()+1, std::multiplies<>{}); } else { @@ -170,9 +173,13 @@ template auto s = typename extents_core::base_type{}; std::fill(s.begin(),s.end(),1ul); - if(empty(e) || is_vector(e) || is_scalar(e)){ + if(empty(e) || is_scalar(e)){ return s; } + + + // || is_vector(e) + if constexpr(std::is_same_v){ std::transform(begin (e), end (e) - 1, s.begin (), s.begin ()+1, std::multiplies<>{}); } else { diff --git a/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp index 82c9b3c41..5702f97fb 100644 --- a/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp +++ b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp @@ -94,9 +94,8 @@ inline decltype(auto) prod( tensor_core< TE > const &a, vector const &b, c nc_base[j++] = na.at(i); auto nc = shape(nc_base); - - auto c = tensor( nc, value{} ); + auto const* bb = &(b(0)); ttv(m, p, c.data(), c.extents().data(), c.strides().data(), diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index f0875b60c..581e16dc6 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -10,26 +10,23 @@ // -#ifndef _BOOST_UBLAS_TENSOR_SPAN_ -#define _BOOST_UBLAS_TENSOR_SPAN_ +#ifndef BOOST_UBLAS_TENSOR_SPAN +#define BOOST_UBLAS_TENSOR_SPAN #include #include #include #include -namespace boost { -namespace numeric { -namespace ublas { -namespace tag { + +#include "concepts.hpp" + +namespace boost::numeric::ublas::tag{ struct sliced {}; struct strided {}; -} -} -} -} +} // namespace boost::numeric::ublas::tag namespace boost::numeric::ublas { @@ -53,7 +50,7 @@ template class span; -static constexpr inline std::size_t end = std::numeric_limits::max(); +static constexpr inline std::size_t max = std::numeric_limits::max(); template<> class span @@ -210,14 +207,14 @@ class span : using sliced_span = span; -template -inline auto ran(unsigned_type f, unsigned_type l) +template +inline auto ran(unsigned_type_left f, unsigned_type_right l) { return sliced_span(f,l); } -template -inline auto ran(unsigned_type f, unsigned_type s, unsigned_type l) +template +inline auto ran(unsigned_type_left f, unsigned_type_middle s, unsigned_type_right l) { return strided_span(f,s,l); } diff --git a/include/boost/numeric/ublas/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/subtensor.hpp index 5b07e9c5f..e8a150d16 100644 --- a/include/boost/numeric/ublas/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor.hpp @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,32 +12,19 @@ /// \file subtensor.hpp Definition for the tensor template class -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP_ +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP +#define BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP - - -#include -#include -#include -#include -#include +#include "tensor.hpp" +#include "subtensor_utility.hpp" +#include "extents.hpp" +#include "span.hpp" +#include "expression.hpp" namespace boost::numeric::ublas { -template -class dynamic_tensor; - -template -class matrix; - -template -class vector; - - - /** @brief A view of a dense tensor of values of type \c T. @@ -61,63 +48,63 @@ class subtensor; * @tparam A The type of the storage array of the tensor. Default is \c unbounded_array. \c and \c std::vector can also be used */ template -class subtensor > +class subtensor > : public detail::tensor_expression< - subtensor> , - subtensor> > + subtensor> , + subtensor> > { - static_assert( std::is_same::value || std::is_same::value, - "boost::numeric::tensor template class only supports first- or last-order storage formats."); + static_assert( std::is_same::value || std::is_same::value, + "boost::numeric::tensor template class only supports first- or last-order storage formats."); - using tensor_type = dynamic_tensor; - using self_type = subtensor; + using tensor_type = tensor_dynamic; + using self_type = subtensor; public: - using domain_tag = tag::sliced; + using domain_tag = tag::sliced; - using span_type = span; + using span_type = span; - template - using tensor_expression_type = detail::tensor_expression; + template + using tensor_expression_type = detail::tensor_expression; - template - using matrix_expression_type = matrix_expression; + template + using matrix_expression_type = matrix_expression; - template - using vector_expression_type = vector_expression; + template + using vector_expression_type = vector_expression; - using super_type = tensor_expression_type; + using super_type = tensor_expression_type; -// static_assert(std::is_same_v, detail::tensor_expression,tensor>>, "tensor_expression_type"); + // static_assert(std::is_same_v, detail::tensor_expression,tensor>>, "tensor_expression_type"); - using array_type = typename tensor_type::array_type; - using layout_type = typename tensor_type::layout_type; + using container_type = typename tensor_type::container_type; + using layout_type = typename tensor_type::layout_type; - using size_type = typename tensor_type::size_type; - using difference_type = typename tensor_type::difference_type; - using value_type = typename tensor_type::value_type; + using size_type = typename tensor_type::size_type; + using difference_type = typename tensor_type::difference_type; + using value_type = typename tensor_type::value_type; - using reference = typename tensor_type::reference; - using const_reference = typename tensor_type::const_reference; + using reference = typename tensor_type::reference; + using const_reference = typename tensor_type::const_reference; - using pointer = typename tensor_type::pointer; - using const_pointer = typename tensor_type::const_pointer; + using pointer = typename tensor_type::pointer; + using const_pointer = typename tensor_type::const_pointer; -// using iterator = typename array_type::iterator; -// using const_iterator = typename array_type::const_iterator; + // using iterator = typename array_type::iterator; + // using const_iterator = typename array_type::const_iterator; -// using reverse_iterator = typename array_type::reverse_iterator; -// using const_reverse_iterator = typename array_type::const_reverse_iterator; + // using reverse_iterator = typename array_type::reverse_iterator; + // using const_reverse_iterator = typename array_type::const_reverse_iterator; - using tensor_temporary_type = self_type; - using storage_category = dense_tag; + using tensor_temporary_type = self_type; + using storage_category = dense_tag; - using strides_type = basic_strides; - using extents_type = basic_extents; + using extents_type = extents<>; + using strides_type = typename extents_type::base_type; - using matrix_type = matrix; - using vector_type = vector; + using matrix_type = matrix; + using vector_type = vector; @@ -143,7 +130,7 @@ class subtensor > : super_type () , spans_ (detail::generate_span_vector(t.extents(),std::forward(spans)...)) , extents_ (detail::compute_extents(spans_)) - , strides_ (extents_) + , strides_ (ublas::to_strides(extents_,layout_type{})) , span_strides_ (detail::compute_span_strides(t.strides(),spans_)) , data_ {t.data() + detail::compute_offset(t.strides(), spans_)} { @@ -381,36 +368,36 @@ class subtensor > #endif - /** @brief Returns true if the subtensor is empty (\c size==0) */ - inline bool empty () const { - return this->size() == 0ul; - } +// /** @brief Returns true if the subtensor is empty (\c size==0) */ +// inline bool empty () const { +// return this->size() == 0ul; +// } - /** @brief Returns the size of the subtensor */ - inline size_type size () const { - return product(this->extents_); - } +// /** @brief Returns the size of the subtensor */ +// inline size_type size () const { +// return product(this->extents_); +// } - /** @brief Returns the size of the subtensor */ - inline size_type size (size_type r) const { - return this->extents_.at(r); - } +// /** @brief Returns the size of the subtensor */ +// inline size_type size (size_type r) const { +// return this->extents_.at(r); +// } - /** @brief Returns the number of dimensions/modes of the subtensor */ - inline size_type rank () const { - return this->extents_.size(); - } +// /** @brief Returns the number of dimensions/modes of the subtensor */ +// inline size_type rank () const { +// return this->extents_.size(); +// } - /** @brief Returns the number of dimensions/modes of the subtensor */ - inline size_type order () const { - return this->extents_.size(); - } +// /** @brief Returns the number of dimensions/modes of the subtensor */ +// inline size_type order () const { +// return this->extents_.size(); +// } - /** @brief Returns the strides of the subtensor */ - inline auto const& strides () const { - return this->strides_; - } +// /** @brief Returns the strides of the subtensor */ +// inline auto const& strides () const { +// return this->strides_; +// } /** @brief Returns the span strides of the subtensor */ inline auto const& span_strides () const { @@ -423,21 +410,34 @@ class subtensor > } - /** @brief Returns the extents of the subtensor */ - inline auto const& extents () const { - return this->extents_; - } +// /** @brief Returns the extents of the subtensor */ +// inline auto const& extents() const { +// return this->extents_; +// } - /** @brief Returns a \c const reference to the container. */ - inline const_pointer data () const { - return this->data_; - } + [[nodiscard]] inline auto empty () const noexcept { return this->size() == 0ul; } + [[nodiscard]] inline auto size () const noexcept { return product(this->extents_); } + [[nodiscard]] inline auto size (size_type r) const { return extents_.at(r); } + [[nodiscard]] inline auto rank () const { return extents_.size(); } + [[nodiscard]] inline auto order () const { return this->rank(); } + + [[nodiscard]] inline auto const& strides () const noexcept { return strides_; } + [[nodiscard]] inline auto const& getExtents () const noexcept { return extents_; } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return data_;} + [[nodiscard]] inline auto data () noexcept -> pointer { return data_;} +// [[nodiscard]] inline auto const& base () const noexcept { return _container; } - /** @brief Returns a \c const reference to the container. */ - inline pointer data () { - return this->data_; - } + +// /** @brief Returns a \c const reference to the container. */ +// inline const_pointer data() const { +// return this->data_; +// } + +// /** @brief Returns a \c const reference to the container. */ +// inline pointer data () { +// return this->data_; +// } @@ -660,20 +660,6 @@ class subtensor > } -#if 0 - // ------------- - // Serialization - // ------------- - - /// Serialize a tensor into and archive as defined in Boost - /// \param ar Archive object. Can be a flat file, an XML file or any other stream - /// \param file_version Optional file version (not yet used) - template - void serialize(Archive & ar, const unsigned int /* file_version */){ - ar & serialization::make_nvp("data",data_); - } -#endif - #endif private: @@ -686,11 +672,6 @@ class subtensor > }; -} // namespaces - - - - - +} // namespaces boost::numeric::ublas #endif diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp index 6c42763d0..4c38be404 100644 --- a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -19,9 +19,9 @@ #include #include -#include -#include -#include +#include "span.hpp" +#include "extents.hpp" +#include "tags.hpp" namespace boost::numeric::ublas::detail { @@ -36,19 +36,18 @@ namespace boost::numeric::ublas::detail { * @param[in] strides strides of the tensor, the subtensor refers to * @param[in] spans vector of spans of the subtensor */ -template -auto compute_span_strides(strides_type const& strides, spans_type const& spans) +template +auto compute_span_strides(std::vector const& strides, Spans const& spans) { if(strides.size() != spans.size()) throw std::runtime_error("Error in boost::numeric::ublas::subtensor::compute_span_strides(): tensor strides.size() != spans.size()"); - using base_type = typename strides_type::base_type; - auto span_strides = base_type(spans.size()); + auto span_strides = std::vector(spans.size()); std::transform(strides.begin(), strides.end(), spans.begin(), span_strides.begin(), [](auto w, auto const& s) { return w * s.step(); } ); - return strides_type( span_strides ); + return std::vector( span_strides ); } /*! @brief Computes the data pointer offset for a subtensor @@ -60,16 +59,14 @@ auto compute_span_strides(strides_type const& strides, spans_type const& spans) * @param[in] strides strides of the tensor, the subtensor refers to * @param[in] spans vector of spans of the subtensor */ -template -auto compute_offset(strides_type const& strides, spans_type const& spans) +template +auto compute_offset(std::vector const& strides, Spans const& spans) { if(strides.size() != spans.size()) throw std::runtime_error("Error in boost::numeric::ublas::subtensor::offset(): tensor strides.size() != spans.size()"); - using value_type = typename strides_type::value_type; - - return std::inner_product(spans.begin(), spans.end(), strides.begin(), value_type(0), - std::plus(), [](auto const& s, value_type w) {return s.first() * w; } ); + return std::inner_product(spans.begin(), spans.end(), strides.begin(), Size(0), + std::plus(), [](auto const& s, Size w) {return s.first() * w; } ); } @@ -82,7 +79,7 @@ auto compute_offset(strides_type const& strides, spans_type const& spans) template auto compute_extents(spans_type const& spans) { - using extents_t = basic_extents; + using extents_t = extents<>; using base_type = typename extents_t::base_type; if(spans.empty()) return extents_t{}; @@ -106,13 +103,13 @@ auto compute_extents(spans_type const& spans) * @param[in] extent extent that is maybe used for the tranformation */ template -auto transform_span(span const& s, size_type const extent) +auto transform_span(span const& s, std::size_t const extent) { using span_type = span; - size_type first = s.first(); - size_type last = s.last (); - size_type size = s.size (); + std::size_t first = s.first(); + std::size_t last = s.last (); + std::size_t size = s.size (); auto const extent0 = extent-1; @@ -121,41 +118,42 @@ auto transform_span(span const& s, size_type const extent) if constexpr ( is_sliced ){ if(size == 0) return span_type(0 , extent0); - else if(first== end) return span_type(extent0 , extent0); - else if(last == end) return span_type(first , extent0); + else if(first== max) return span_type(extent0 , extent0); + else if(last == max) return span_type(first , extent0); else return span_type(first , last ); } else { size_type step = s.step (); if(size == 0) return span_type(0 , size_type(1), extent0); - else if(first== end) return span_type(extent0 , step, extent0); - else if(last == end) return span_type(first , step, extent0); + else if(first== max) return span_type(extent0 , step, extent0); + else if(last == max) return span_type(first , step, extent0); else return span_type(first , step, last ); } + return span_type{}; } -template -void transform_spans_impl (basic_extents const& extents, std::array& span_array, std::size_t arg, span_types&& ... spans ); +template +void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ); -template -void transform_spans_impl(basic_extents const& extents, std::array& span_array, span const& s, span_types&& ... spans) +template +void transform_spans_impl(extents<> const& extents, std::array& span_array, span const& s, Spans&& ... spans) { std::get(span_array) = transform_span(s, extents[r]); static constexpr auto nspans = sizeof...(spans); static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); if constexpr (nspans>0) - transform_spans_impl(extents, span_array, std::forward(spans)...); + transform_spans_impl(extents, span_array, std::forward(spans)...); } -template -void transform_spans_impl (basic_extents const& extents, std::array& span_array, std::size_t arg, span_types&& ... spans ) +template +void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ) { - static constexpr auto nspans = sizeof...(spans); + static constexpr auto nspans = sizeof...(Spans); static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); - std::get(span_array) = transform_span(span_type(arg), extents[r]); + std::get(span_array) = transform_span(Span(arg), extents[r]); if constexpr (nspans>0) - transform_spans_impl(extents, span_array, std::forward(spans) ... ); + transform_spans_impl(extents, span_array, std::forward(spans) ... ); } @@ -170,15 +168,15 @@ void transform_spans_impl (basic_extents const& extents, std::array -auto generate_span_array(basic_extents const& extents, span_types&& ... spans) +template +auto generate_span_array(extents<> const& extents, Spans&& ... spans) { - constexpr static auto n = sizeof...(spans); + constexpr static auto n = sizeof...(Spans); if(extents.size() != n) throw std::runtime_error("Error in boost::numeric::ublas::generate_span_vector() when creating subtensor: the number of spans does not match with the tensor rank."); std::array span_array; if constexpr (n>0) - transform_spans_impl<0>( extents, span_array, std::forward(spans)... ); + transform_spans_impl<0>( extents, span_array, std::forward(spans)... ); return span_array; } @@ -193,10 +191,10 @@ auto generate_span_array(basic_extents const& extents, span_types&& . * @param[in] extents of the tensor * @param[in] spans spans with which the subtensor is created */ -template -auto generate_span_vector(basic_extents const& extents, span_types&& ... spans) +template +auto generate_span_vector(extents<> const& extents, Spans&& ... spans) { - auto span_array = generate_span_array(extents,std::forward(spans)...); + auto span_array = generate_span_array(extents,std::forward(spans)...); return std::vector(span_array.begin(), span_array.end()); } diff --git a/include/boost/numeric/ublas/tensor/tags.hpp b/include/boost/numeric/ublas/tensor/tags.hpp index 3459b80da..711941e18 100644 --- a/include/boost/numeric/ublas/tensor/tags.hpp +++ b/include/boost/numeric/ublas/tensor/tags.hpp @@ -10,33 +10,19 @@ #ifndef BOOST_UBLAS_TENSOR_TAGS_HPP #define BOOST_UBLAS_TENSOR_TAGS_HPP -#include "../fwd.hpp" - - namespace boost::numeric::ublas{ - - struct tensor_tag{}; - - struct storage_resizable_container_tag{}; - - struct storage_static_container_tag{}; - - struct storage_seq_container_tag{}; - - struct storage_non_seq_container_tag{}; - +struct tensor_tag{}; +struct storage_resizable_container_tag{}; +struct storage_static_container_tag{}; +struct storage_seq_container_tag{}; +struct storage_non_seq_container_tag{}; } // namespace boost::numeric::ublas -namespace detail::tag { - +namespace boost::numeric::ublas::detail::tag { struct unit_access {}; struct non_unit_access{}; - -} // namespace boost::numeric::tags - - -} // namespace boost::numeric::ublas +} // namespace boost::numeric::ublas::detail::tag #endif // BOOST_UBLAS_TENSOR_TAGS_HPP diff --git a/test/tensor/test_access.cpp b/test/tensor/test_access.cpp index 57ea432f9..dd0b08607 100644 --- a/test/tensor/test_access.cpp +++ b/test/tensor/test_access.cpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -12,8 +12,7 @@ #include #include -#include -#include +#include #include #include @@ -27,143 +26,145 @@ using layout_types = std::tuple>::with_t; -struct fixture { - using extents_t = boost::numeric::ublas::dynamic_extents<>; - using value_t = typename extents_t::value_type; - using multi_index_t = std::vector; - using index_t = value_t; - - fixture() - { - static_assert(shapes.size() == multi_index.size(),""); - static_assert(shapes.size() == indexf.size(),""); - static_assert(shapes.size() == indexl.size(),""); - static_assert(shapes.size() == ranks.size(),""); - - for(auto k = 0u; k < multi_index.size(); ++k){ - auto const& n = shapes[k]; - auto const r = ranks[k]; - assert( n.size() == r ); - for (auto const& i : multi_index[k]){ - assert( std::equal(i.begin(), i.end(), n.begin(), std::less<>{}) ) ; - } - } - } - +struct fixture +{ - static inline auto shapes = std::array - {{ - { }, - {1,1 }, - - {1,2 }, - {1,4 }, - {2,1 }, - {4,1 }, - {2,3 }, - - {2,3,1 }, - {1,2,3 }, - {3,1,2 }, - {3,2,4 }, - - {2,3,4,1}, - {1,2,3,4}, - {3,1,2,4}, - {3,2,4,5} - }}; - - static constexpr inline auto ranks = std::array - { 0,2,2,2,2,2,2,3,3,3,3,4,4,4,4 }; - - static inline auto multi_index = std::array,shapes.size()> - {{ - {{ { }, { }, { } }}, // 0 {} - {{ {0,0 }, {0,0 }, {0,0 } }}, // 1 {1,1} - - {{ {0,0 }, {0,1 }, {0,1 } }}, // 2 {1,2} - {{ {0,0 }, {0,2 }, {0,3 } }}, // 3 {1,4} - {{ {0,0 }, {1,0 }, {1,0 } }}, // 4 {2,1} - {{ {0,0 }, {2,0 }, {3,0 } }}, // 5 {4,1} - {{ {0,0 }, {1,1 }, {1,2 } }}, // 6 {2,3} - - {{ {0,0,0 }, {1,1,0 }, {1,2,0 } }}, // 7 {2,3,1} - {{ {0,0,0 }, {0,1,1 }, {0,1,2 } }}, // 8 {1,2,3} - {{ {0,0,0 }, {1,0,1 }, {2,0,1 } }}, // 9 {3,1,2} - {{ {0,0,0 }, {1,1,2 }, {2,1,3 } }}, //10 {3,2,4} - - {{ {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }}, //11 {2,3,4,1} - {{ {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }}, //12 {1,2,3,4} - {{ {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }}, //13 {3,1,2,4} - {{ {0,0,0,0}, {1,1,2,3}, {2,1,3,4} }} //14 {3,2,4,5} - }}; - - static constexpr inline auto indexf = std::array,shapes.size()> - {{ - {{0, 0, 0}}, // 0 {} - {{0, 0, 0}}, // 1 {1,1} - {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} - {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} - {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} - {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} - {{0, 3, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3} - {{0, 3, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1} - {{0, 3, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3} - {{0, 4, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2} - {{0,16, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4}, {1,3,6} - {{0,15, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {1,2,6,6} - {{0,15, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {1,1,2,6} - {{0,16, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, {1,3,3,6} - {{0,88,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {1,3,6,24} - }}; - - static constexpr inline auto indexl = std::array,shapes.size()> - {{ - {{0, 0, 0}}, // 0 {} - {{0, 0, 0}}, // 1 {1,1} - {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} - {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} - {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} - {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} - {{0, 4, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3 }, {3,1} - {{0, 4, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1 }, {3,1,1} - {{0, 4, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3 }, {6,3,1} - {{0, 3, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2 }, {2,2,1} - {{0,14, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4 }, {8,4,1} - {{0,18, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {12, 4,1,1} - {{0,18, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {24,12,4,1} - {{0,14, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, { 8, 8,4,1} - {{0,73,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {40,20,5,1} - }}; - - template - constexpr inline auto prodn(extents_type const& n) - { - return std::accumulate(n.begin(),n.end(),1ul, std::multiplies<>{}); + using extents_t = boost::numeric::ublas::extents<>; + using value_t = typename extents_t::value_type; + using multi_index_t = std::vector; + using index_t = value_t; + + fixture() + { + static_assert(shapes.size() == multi_index.size(),""); + static_assert(shapes.size() == indexf.size(),""); + static_assert(shapes.size() == indexl.size(),""); + static_assert(shapes.size() == ranks.size(),""); + + for(auto k = 0u; k < multi_index.size(); ++k){ + auto const& n = shapes[k]; + auto const r = ranks[k]; + assert( n.size() == r ); + for (auto const& i : multi_index[k]){ + assert( std::equal(i.begin(), i.end(), boost::numeric::ublas::begin(n), std::less<>{}) ) ; + } } - - // static constexpr inline auto const& e = shapes; - // static constexpr inline auto const& i = multi_indices; - - - // template struct x { static inline constexpr auto value = e[k][r]*x::value; }; - // template struct x { static inline constexpr auto value = 1; }; - // template struct x { static inline constexpr auto value = 1*x::value; }; - - // template struct y { static inline constexpr auto value = e[k][r ]*y::value; }; - // template struct y { static inline constexpr auto value = 1*y::value; }; - // template struct y { static inline constexpr auto value = e[k][p-1]; }; - - - // template static inline constexpr auto wf = x::value; - // template static inline constexpr auto wl = y::value; - - // template struct zf { static inline constexpr auto value = i[k][kk][r]*wf + zf::value; }; - // template struct zf<0,k,kk> { static inline constexpr auto value = i[k][kk][0]*wf; }; - - // template static inline constexpr auto c2 = zf<2,k,kk>::value; - // template static inline constexpr auto c3 = zf<3,k,kk>::value; - // template static inline constexpr auto c4 = zf<4,k,kk>::value; + } + + + static inline auto shapes = std::array + {{ + { }, + {1,1 }, + + {1,2 }, + {1,4 }, + {2,1 }, + {4,1 }, + {2,3 }, + + {2,3,1 }, + {1,2,3 }, + {3,1,2 }, + {3,2,4 }, + + {2,3,4,1}, + {1,2,3,4}, + {3,1,2,4}, + {3,2,4,5} + }}; + + static constexpr inline auto ranks = std::array + { 0,2,2,2,2,2,2,3,3,3,3,4,4,4,4 }; + + static inline auto multi_index = std::array,shapes.size()> + {{ + {{ { }, { }, { } }}, // 0 {} + {{ {0,0 }, {0,0 }, {0,0 } }}, // 1 {1,1} + + {{ {0,0 }, {0,1 }, {0,1 } }}, // 2 {1,2} + {{ {0,0 }, {0,2 }, {0,3 } }}, // 3 {1,4} + {{ {0,0 }, {1,0 }, {1,0 } }}, // 4 {2,1} + {{ {0,0 }, {2,0 }, {3,0 } }}, // 5 {4,1} + {{ {0,0 }, {1,1 }, {1,2 } }}, // 6 {2,3} + + {{ {0,0,0 }, {1,1,0 }, {1,2,0 } }}, // 7 {2,3,1} + {{ {0,0,0 }, {0,1,1 }, {0,1,2 } }}, // 8 {1,2,3} + {{ {0,0,0 }, {1,0,1 }, {2,0,1 } }}, // 9 {3,1,2} + {{ {0,0,0 }, {1,1,2 }, {2,1,3 } }}, //10 {3,2,4} + + {{ {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }}, //11 {2,3,4,1} + {{ {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }}, //12 {1,2,3,4} + {{ {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }}, //13 {3,1,2,4} + {{ {0,0,0,0}, {1,1,2,3}, {2,1,3,4} }} //14 {3,2,4,5} + }}; + + static constexpr inline auto indexf = std::array,shapes.size()> + {{ + {{0, 0, 0}}, // 0 {} + {{0, 0, 0}}, // 1 {1,1} + {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} + {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} + {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} + {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} + {{0, 3, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3} + {{0, 3, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1} + {{0, 3, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3} + {{0, 4, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2} + {{0,16, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4}, {1,3,6} + {{0,15, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {1,2,6,6} + {{0,15, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {1,1,2,6} + {{0,16, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, {1,3,3,6} + {{0,88,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {1,3,6,24} + }}; + + static constexpr inline auto indexl = std::array,shapes.size()> + {{ + {{0, 0, 0}}, // 0 {} + {{0, 0, 0}}, // 1 {1,1} + {{0, 1, 1}}, // 2 { {0,0 }, {0,1 }, {0,1 } }, // 2 {1,2} + {{0, 2, 3}}, // 3 { {0,0 }, {0,2 }, {0,3 } }, // 2 {1,4} + {{0, 1, 1}}, // 4 { {0,0 }, {1,0 }, {1,0 } }, // 3 {2,1} + {{0, 2, 3}}, // 5 { {0,0 }, {2,0 }, {3,0 } }, // 3 {4,1} + {{0, 4, 5}}, // 6 { {0,0 }, {1,1 }, {1,2 } }, // 4 {2,3 }, {3,1} + {{0, 4, 5}}, // 7 { {0,0,0 }, {1,1,0 }, {1,2,0 } }, // 5 {2,3,1 }, {3,1,1} + {{0, 4, 5}}, // 8 { {0,0,0 }, {0,1,1 }, {0,1,2 } }, // 6 {1,2,3 }, {6,3,1} + {{0, 3, 5}}, // 9 { {0,0,0 }, {1,0,1 }, {2,0,1 } }, // 7 {3,1,2 }, {2,2,1} + {{0,14, 23}}, // 10 { {0,0,0 }, {1,1,2 }, {2,1,3 } }, // 8 {3,2,4 }, {8,4,1} + {{0,18, 23}}, // 11 { {0,0,0,0}, {1,1,2,0}, {1,2,3,0} }, // 9 {2,3,4,1}, {12, 4,1,1} + {{0,18, 23}}, // 12 { {0,0,0,0}, {0,1,1,2}, {0,1,2,3} }, //10 {1,2,3,4}, {24,12,4,1} + {{0,14, 23}}, // 13 { {0,0,0,0}, {1,0,1,2}, {2,0,1,3} }, //11 {3,1,2,4}, { 8, 8,4,1} + {{0,73,119}}, // 14 { {0,0,0,0}, {1,1,2,3}, {2,1,3,4} } //12 {3,2,4,5}, {40,20,5,1} + }}; + + template + constexpr inline auto prodn(extents_type const& n) + { + return std::accumulate(boost::numeric::ublas::begin(n),boost::numeric::ublas::end(n),1ul, std::multiplies<>{}); + } + + // static constexpr inline auto const& e = shapes; + // static constexpr inline auto const& i = multi_indices; + + + // template struct x { static inline constexpr auto value = e[k][r]*x::value; }; + // template struct x { static inline constexpr auto value = 1; }; + // template struct x { static inline constexpr auto value = 1*x::value; }; + + // template struct y { static inline constexpr auto value = e[k][r ]*y::value; }; + // template struct y { static inline constexpr auto value = 1*y::value; }; + // template struct y { static inline constexpr auto value = e[k][p-1]; }; + + + // template static inline constexpr auto wf = x::value; + // template static inline constexpr auto wl = y::value; + + // template struct zf { static inline constexpr auto value = i[k][kk][r]*wf + zf::value; }; + // template struct zf<0,k,kk> { static inline constexpr auto value = i[k][kk][0]*wf; }; + + // template static inline constexpr auto c2 = zf<2,k,kk>::value; + // template static inline constexpr auto c3 = zf<3,k,kk>::value; + // template static inline constexpr auto c4 = zf<4,k,kk>::value; @@ -173,70 +174,70 @@ struct fixture { BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index, layout_t, layout_types, fixture ) { - namespace ub = boost::numeric::ublas; - namespace mp = boost::mp11; - using strides_t = ub::basic_strides; - - - constexpr auto is_first_order = std::is_same_v; - constexpr auto const& index = is_first_order ? indexf : indexl; - - mp::mp_for_each>( [&]( auto I ) { - auto const& n = std::get(shapes); - auto const& i = std::get(multi_index); - auto const& jref = std::get(index); - mp::mp_for_each>( [&]( auto K ) { - auto const& ii = std::get(i); - auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,strides_t(n).begin()); - BOOST_CHECK(j < prodn(n)); - BOOST_CHECK_EQUAL(j,jref[K]); - }); + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; + + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + mp::mp_for_each>( [&]( auto I ) { + auto const& n = std::get(shapes); + auto const& w = ub::to_strides(n,layout_t{}); + auto const& i = std::get(multi_index); + auto const& jref = std::get(index); + mp::mp_for_each>( [&]( auto K ) { + auto const& ii = std::get(i); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,w.begin()); + BOOST_CHECK(j < prodn(n)); + BOOST_CHECK_EQUAL(j,jref[K]); }); + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_static_rank, layout_t, layout_types, fixture ) { - namespace ub = boost::numeric::ublas; - namespace mp = boost::mp11; - using strides_t = ub::basic_strides; - - constexpr auto is_first_order = std::is_same_v; - constexpr auto const& index = is_first_order ? indexf : indexl; - - mp::mp_for_each>( [&]( auto I ) { - auto const& n = std::get(shapes); - auto const& i = std::get(multi_index); - auto const& jref = std::get(index); - constexpr auto r = std::get(ranks); - mp::mp_for_each>( [&]( auto K ) { - auto const& ii = std::get(i); - auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,strides_t(n).begin()); - BOOST_CHECK(j < prodn(n)); - BOOST_CHECK_EQUAL(j,jref[K]); - }); + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; + + constexpr auto is_first_order = std::is_same_v; + constexpr auto const& index = is_first_order ? indexf : indexl; + + mp::mp_for_each>( [&]( auto I ) { + auto const& n = std::get(shapes); + auto const& w = ub::to_strides(n,layout_t{}); + auto const& i = std::get(multi_index); + auto const& jref = std::get(index); + constexpr auto r = std::get(ranks); + mp::mp_for_each>( [&]( auto K ) { + auto const& ii = std::get(i); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() , w.begin()); + BOOST_CHECK(j < prodn(n)); + BOOST_CHECK_EQUAL(j,jref[K]); }); + }); } BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index, layout_t, layout_types, fixture ) { - using namespace boost::numeric::ublas; - using strides_t = basic_strides; + namespace ub = boost::numeric::ublas; + namespace mp = boost::mp11; - constexpr auto is_first_order = std::is_same_v; + constexpr auto is_first_order = std::is_same_v; constexpr auto const& index = is_first_order ? indexf : indexl; for(auto k = 0u; k < index.size(); ++k){ auto const& n = shapes[k]; + auto const& w = ub::to_strides(n,layout_t{}); auto const& iref = multi_index[k]; - auto const& w = strides_t(n); auto const& jref = index[k]; for(auto kk = 0u; kk < iref.size(); ++kk){ auto const jj = jref[kk]; auto const& ii = iref[kk]; auto i = multi_index_t(w.size()); - detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); + ub::detail::compute_multi_index(jj, w.begin(), w.end(), i.begin(), layout_t{}); // if constexpr ( is_first_order ) // detail::compute_multi_index_first(jj, w.begin(), w.end(), i.begin()); // else @@ -260,7 +261,6 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t { namespace ub = boost::numeric::ublas; namespace mp = boost::mp11; - using strides_t = ub::basic_strides; constexpr auto is_first_order = std::is_same_v; constexpr auto const& index = is_first_order ? indexf : indexl; @@ -270,7 +270,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t auto const& n = std::get(shapes); auto const& iref = std::get(multi_index); auto const& jref = std::get(index); - auto const& w = strides_t(n); + auto const& w = ub::to_strides(n,layout_t{}); constexpr auto r = std::get(ranks); mp::mp_for_each>( [&]( auto K ) { auto const jj = std::get(jref); @@ -287,22 +287,21 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_multi_index_static_rank, layout_t BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_subtensor, layout_t, layout_types, fixture ) { - using namespace boost::numeric::ublas; - using strides_t = basic_strides; + namespace ub = boost::numeric::ublas; // subtensor the whole index-domain of a tensor - constexpr auto is_first_order = std::is_same_v; + constexpr auto is_first_order = std::is_same_v; constexpr auto const& index = is_first_order ? indexf : indexl; // subtensor the whole index-domain of a tensor for(auto k = 0u; k < index.size(); ++k){ auto const& n = shapes[k]; - auto const& w = strides_t(n); + auto const& w = ub::to_strides(n,layout_t{}); auto const& jref = index[k]; for(auto kk = 0u; kk < jref.size(); ++kk){ auto const jj = jref[kk]; - auto const j = detail::compute_single_index(jj,w.begin(),w.end(),w.begin()); + auto const j = ub::detail::compute_single_index(jj,w.begin(),w.end(),w.begin()); BOOST_CHECK_EQUAL ( j, jj ) ; } } diff --git a/test/tensor/test_algorithms.cpp b/test/tensor/test_algorithms.cpp index 8f1a3b225..a03b4fbca 100644 --- a/test/tensor/test_algorithms.cpp +++ b/test/tensor/test_algorithms.cpp @@ -21,10 +21,6 @@ #include -BOOST_AUTO_TEST_SUITE ( test_tensor_algorithms/*, - * boost::unit_test::depends_on("test_shape_dynamic") * boost::unit_test::depends_on("test_strides")*/ - ) - BOOST_AUTO_TEST_SUITE ( test_tensor_algorithms) diff --git a/test/tensor/test_functions.cpp b/test/tensor/test_functions.cpp index c5a55048c..920fdac9a 100644 --- a/test/tensor/test_functions.cpp +++ b/test/tensor/test_functions.cpp @@ -62,12 +62,20 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_vector, value, test_types, f auto a = tensor_type(n, value_type{2}); + std::cout << "a=" << a << std::endl; + for(auto m = 0u; m < ublas::size(n); ++m){ auto b = vector_type (n[m], value_type{1} ); + std::cout << "b=" << tensor_type(b) << std::endl; + + std::cout << "m=" << m << std::endl; + auto c = ublas::prod(a, b, m+1); + std::cout << "c=" << tensor_type(c) << std::endl; + for(auto i = 0u; i < c.size(); ++i) BOOST_CHECK_EQUAL( c[i] , value_type( static_cast< inner_type_t >(n[m]) ) * a[i] ); diff --git a/test/tensor/test_strides.cpp b/test/tensor/test_strides.cpp index 71ef94256..94069a2a9 100644 --- a/test/tensor/test_strides.cpp +++ b/test/tensor/test_strides.cpp @@ -90,7 +90,7 @@ BOOST_AUTO_TEST_CASE( test_strides_ctor_access_first_order) BOOST_CHECK_EQUAL ( s12[1], 1); BOOST_CHECK_EQUAL ( s21[0], 1); - BOOST_CHECK_EQUAL ( s21[1], 1); + BOOST_CHECK_EQUAL ( s21[1], 2); BOOST_CHECK_EQUAL ( s23[0], 1); @@ -135,7 +135,7 @@ BOOST_AUTO_TEST_CASE( test_strides_ctor_access_last_order) BOOST_CHECK_EQUAL ( s11[0], 1); BOOST_CHECK_EQUAL ( s11[1], 1); - BOOST_CHECK_EQUAL ( s12[0], 1); + BOOST_CHECK_EQUAL ( s12[0], 2); BOOST_CHECK_EQUAL ( s12[1], 1); BOOST_CHECK_EQUAL ( s21[0], 1); diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index 24788ce6b..f3db7334d 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -14,60 +14,57 @@ #include #include "utility.hpp" -#include + +#include #include #include #include -BOOST_AUTO_TEST_SUITE ( subtensor_testsuite ) ; - -// double,std::complex - - +BOOST_AUTO_TEST_SUITE ( subtensor_testsuite ) -using test_types = zip>::with_t; +using test_types = zip>::with_t; struct fixture_shape { - using shape = boost::numeric::ublas::basic_extents; - - fixture_shape() : extents{ - shape{}, // 0 - shape{1,1}, // 1 - shape{1,2}, // 2 - shape{2,1}, // 3 - shape{2,3}, // 4 - shape{2,3,1}, // 5 - shape{4,1,3}, // 6 - shape{1,2,3}, // 7 - shape{4,2,3}, // 8 - shape{4,2,3,5} // 9 - } - {} - std::vector extents; + using shape = boost::numeric::ublas::extents<>; + + fixture_shape() : extents{ + shape{}, // 0 + shape{1,1}, // 1 + shape{1,2}, // 2 + shape{2,1}, // 3 + shape{2,3}, // 4 + shape{2,3,1}, // 5 + shape{4,1,3}, // 6 + shape{1,2,3}, // 7 + shape{4,2,3}, // 8 + shape{4,2,3,5} // 9 + } + {} + std::vector extents; }; BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixture_shape ) { - namespace ub = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ub::dynamic_tensor; - using subtensor_type = ub::subtensor; + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + using subtensor_type = ublas::subtensor; auto check = [](auto const& e) { - auto t = tensor_type{e}; + auto t = tensor_type(e); auto s = subtensor_type(t); BOOST_CHECK_EQUAL ( s.size() , t.size() ); BOOST_CHECK_EQUAL ( s.rank() , t.rank() ); - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK_EQUAL ( s.empty(), t.empty() ); BOOST_CHECK_EQUAL ( s. data(), t. data() ); } @@ -88,125 +85,125 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) { namespace ub = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ub::dynamic_tensor; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ub::tensor_dynamic; using subtensor_type = ub::subtensor; using span = ub::sliced_span; - { - auto A = tensor_type{}; - auto Asub = subtensor_type( A ); + { + auto A = tensor_type{}; + auto Asub = subtensor_type( A ); - BOOST_CHECK( Asub.span_strides() == A.strides() ); - BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.extents() == A.extents() ); - BOOST_CHECK( Asub.data() == A.data() ); - } + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.getExtents()() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } - { - auto A = tensor_type{1,1}; - auto Asub = subtensor_type( A, 0, 0 ); + { + auto A = tensor_type{1,1}; + auto Asub = subtensor_type( A, 0, 0 ); - BOOST_CHECK( Asub.span_strides() == A.strides() ); - BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.extents() == A.extents() ); - BOOST_CHECK( Asub.data() == A.data() ); - } + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.getExtents()() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } - { - auto A = tensor_type{1,2}; + { + auto A = tensor_type{1,2}; auto Asub = subtensor_type( A, 0, span{} ); - BOOST_CHECK( Asub.span_strides() == A.strides() ); - BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.extents() == A.extents() ); - BOOST_CHECK( Asub.data() == A.data() ); - } - { - auto A = tensor_type{1,2}; - auto Asub = subtensor_type( A, 0, 1 ); + BOOST_CHECK( Asub.span_strides() == A.strides() ); + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.getExtents()() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + } + { + auto A = tensor_type{1,2}; + auto Asub = subtensor_type( A, 0, 1 ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), 1 ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), 1 ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), 1 ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), 1 ); - BOOST_CHECK_EQUAL( Asub.strides().at(0), 1 ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), 1 ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), 1 ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), 1 ); - BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); - BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 1 ); - BOOST_CHECK_EQUAL( Asub.data() , A.data()+ - Asub.spans().at(0).first()*A.strides().at(0) + - Asub.spans().at(1).first()*A.strides().at(1) ); - } + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } - { - auto A = tensor_type{2,3}; - auto Asub = subtensor_type( A, 0, 1 ); - auto B = tensor_type(Asub.extents()); + { + auto A = tensor_type{2,3}; + auto Asub = subtensor_type( A, 0, 1 ); + auto B = tensor_type(Asub.getExtents()()); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); - BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 1 ); - BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.data() , A.data()+ - Asub.spans().at(0).first()*A.strides().at(0) + - Asub.spans().at(1).first()*A.strides().at(1) ); - } + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } - { - auto A = tensor_type{4,3}; - auto Asub = subtensor_type( A, span(1,2), span(1,ub::end) ); - auto B = tensor_type(Asub.extents()); + { + auto A = tensor_type{4,3}; + auto Asub = subtensor_type( A, span(1,2), span(1,ub::max) ); + auto B = tensor_type(Asub.getExtents()()); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); - BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 2 ); - BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.data() , A.data()+ - Asub.spans().at(0).first()*A.strides().at(0) + - Asub.spans().at(1).first()*A.strides().at(1) ); - } + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + } - { - auto A = tensor_type{4,3,5}; - auto Asub = subtensor_type( A, span(1,2), span(1,ub::end), span(2,4) ); + { + auto A = tensor_type{4,3,5}; + auto Asub = subtensor_type( A, span(1,2), span(1,ub::max), span(2,4) ); - auto B = tensor_type(Asub.extents()); + auto B = tensor_type(Asub.getExtents()()); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(2), A.strides().at(2) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(2), A.strides().at(2) ); - BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); - BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); - BOOST_CHECK_EQUAL( Asub.extents().at(2) , 3 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 2 ); + BOOST_CHECK_EQUAL( Asub.getExtents()().at(2) , 3 ); - BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.strides().at(2), B.strides().at(2) ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(2), B.strides().at(2) ); - BOOST_CHECK_EQUAL( Asub.data() , A.data()+ - Asub.spans().at(0).first()*A.strides().at(0) + - Asub.spans().at(1).first()*A.strides().at(1)+ - Asub.spans().at(2).first()*A.strides().at(2)); - } + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1)+ + Asub.spans().at(2).first()*A.strides().at(2)); + } } @@ -214,17 +211,17 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, fixture_shape ) { - namespace ub = boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ub::dynamic_tensor; - using subtensor_type = ub::subtensor; - using span = ub::sliced_span; + using tensor_type = ublas::tensor_dynamic; + using subtensor_type = ublas::subtensor; + // using span = ub::sliced_span; - auto check = [](auto const& e) - { + auto check = [](auto const& e) + { auto A = tensor_type{e}; value_type i{}; @@ -237,12 +234,12 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, f BOOST_CHECK( Asub.span_strides() == A.strides() ); BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.getExtents()() == A.extents() ); BOOST_CHECK( Asub.data() == A.data() ); BOOST_CHECK( Bsub.span_strides() == A.strides() ); BOOST_CHECK( Bsub.strides() == A.strides() ); - BOOST_CHECK( Bsub.extents() == A.extents() ); + BOOST_CHECK( Bsub.getExtents() == A.extents() ); BOOST_CHECK( Bsub.data() == A.data() ); BOOST_CHECK_EQUAL ( Bsub.size() , A.size() ); @@ -250,22 +247,22 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, f - if(e.empty()) { + if(ublas::empty(e)) { BOOST_CHECK ( Bsub.empty() ); BOOST_CHECK_EQUAL ( Bsub.data() , nullptr); - } - else{ + } + else{ BOOST_CHECK ( !Bsub.empty() ); BOOST_CHECK_NE ( Bsub.data() , nullptr); - } + } for(auto i = 0ul; i < Asub.size(); ++i) BOOST_CHECK_EQUAL( Asub[i], Bsub[i] ); - }; + }; - for(auto const& e : extents) - check(e); + for(auto const& e : extents) + check(e); } @@ -592,4 +589,4 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_ty #endif -BOOST_AUTO_TEST_SUITE_END(); +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index 2c4cace00..b96fb5b57 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -15,14 +15,13 @@ #include "utility.hpp" #include -#include -#include +#include #include #include -BOOST_AUTO_TEST_SUITE ( subtensor_utility_testsuite ) ; +BOOST_AUTO_TEST_SUITE ( subtensor_utility_testsuite ) @@ -36,8 +35,8 @@ struct fixture_sliced_span { span_type(0,2), // 2, a(0:2) span_type(1,1), // 3, a(1:1) span_type(1,3), // 4, a(1:3) - span_type(1,boost::numeric::ublas::end), // 5, a(1:end) - span_type(boost::numeric::ublas::end) // 6, a(end) + span_type(1,boost::numeric::ublas::max), // 5, a(1:end) + span_type(boost::numeric::ublas::max) // 6, a(end) } {} std::vector spans; @@ -47,7 +46,7 @@ struct fixture_sliced_span { BOOST_FIXTURE_TEST_CASE( transform_sliced_span_test, fixture_sliced_span ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; // template BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(2) ) == ublas::sliced_span(0,1) ); @@ -91,8 +90,8 @@ struct fixture_strided_span { span_type(0,2,2), // 2, a(0:2:2) span_type(1,1,1), // 3, a(1:1:1) span_type(1,1,3), // 4, a(1:1:3) - span_type(1,2,boost::numeric::ublas::end), // 5, a(1:2:end) - span_type(boost::numeric::ublas::end) // 6, a(end) + span_type(1,2,boost::numeric::ublas::max), // 5, a(1:2:end) + span_type(boost::numeric::ublas::max) // 6, a(end) } {} std::vector spans; @@ -140,7 +139,7 @@ BOOST_FIXTURE_TEST_CASE( transform_strided_span_test, fixture_strided_span ) struct fixture_shape { - using shape = boost::numeric::ublas::basic_extents; + using shape = boost::numeric::ublas::extents<>; fixture_shape() : extents{ shape{}, // 0 @@ -160,12 +159,12 @@ struct fixture_shape { BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) { - using namespace boost::numeric::ublas; - using span = sliced_span; + namespace ublas = boost::numeric::ublas; + using span = ublas::sliced_span; // shape{} { - auto v = detail::generate_span_array(extents[0]); + auto v = ublas::detail::generate_span_array(extents[0]); auto r = std::vector{}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } @@ -173,67 +172,67 @@ BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) // shape{1,1} { - auto v = detail::generate_span_array(extents[1],span(),span()); + auto v = ublas::detail::generate_span_array(extents[1],span(),span()); auto r = std::vector{span(0,0),span(0,0)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{1,1} - { - auto v = detail::generate_span_array(extents[1],end,span(end)); + { + auto v = ublas::detail::generate_span_array(extents[1],ublas::max,span(ublas::max)); auto r = std::vector{span(0,0),span(0,0)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{1,1} - { - auto v = detail::generate_span_array(extents[1],0,end); + { + auto v = ublas::detail::generate_span_array(extents[1],0,ublas::max); auto r = std::vector{span(0,0),span(0,0)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{1,2} - { - auto v = detail::generate_span_array(extents[2],0,end); + { + auto v = ublas::detail::generate_span_array(extents[2],0,ublas::max); auto r = std::vector{span(0,0),span(1,1)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{1,2} { - auto v = detail::generate_span_array(extents[2],0,1); + auto v = ublas::detail::generate_span_array(extents[2],0,1); auto r = std::vector{span(0,0),span(1,1)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } { - auto v = detail::generate_span_array(extents[2],span(),span()); + auto v = ublas::detail::generate_span_array(extents[2],span(),span()); auto r = std::vector{span(0,0),span(0,1)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{2,3} { - auto v = detail::generate_span_array(extents[4],span(),span()); + auto v = ublas::detail::generate_span_array(extents[4],span(),span()); auto r = std::vector{span(0,1),span(0,2)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } { - auto v = detail::generate_span_array(extents[4],1,span(1,end)); + auto v = ublas::detail::generate_span_array(extents[4],1,span(1,ublas::max)); auto r = std::vector{span(1,1),span(1,2)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } // shape{2,3,1} { - auto v = detail::generate_span_array(extents[5],span(),span(),0); + auto v = ublas::detail::generate_span_array(extents[5],span(),span(),0); auto r = std::vector{span(0,1),span(0,2),span(0,0)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } { - auto v = detail::generate_span_array(extents[5],1,span(),end); + auto v = ublas::detail::generate_span_array(extents[5],1,span(),ublas::max); auto r = std::vector{span(1,1),span(0,2),span(0,0)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } @@ -242,7 +241,7 @@ BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) struct fixture_span_vector_shape { - using shape = boost::numeric::ublas::basic_extents; + using shape = boost::numeric::ublas::extents<>; using span = boost::numeric::ublas::sliced_span; @@ -292,57 +291,57 @@ struct fixture_span_vector_shape { BOOST_FIXTURE_TEST_CASE( extents_test, fixture_span_vector_shape ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; - BOOST_CHECK ( std::equal( std::get<0>(reference_).begin(), std::get<0>(reference_).end(), ublas::detail::compute_extents( std::get<0>(span_vectors_) ).begin() ) ); - BOOST_CHECK ( std::equal( std::get<1>(reference_).begin(), std::get<1>(reference_).end(), ublas::detail::compute_extents( std::get<1>(span_vectors_) ).begin() ) ); - BOOST_CHECK ( std::equal( std::get<2>(reference_).begin(), std::get<2>(reference_).end(), ublas::detail::compute_extents( std::get<2>(span_vectors_) ).begin() ) ); - BOOST_CHECK ( std::equal( std::get<3>(reference_).begin(), std::get<3>(reference_).end(), ublas::detail::compute_extents( std::get<3>(span_vectors_) ).begin() ) ); - BOOST_CHECK ( std::equal( std::get<4>(reference_).begin(), std::get<4>(reference_).end(), ublas::detail::compute_extents( std::get<4>(span_vectors_) ).begin() ) ); - BOOST_CHECK ( std::equal( std::get<5>(reference_).begin(), std::get<5>(reference_).end(), ublas::detail::compute_extents( std::get<5>(span_vectors_) ).begin() ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<0>(reference_)), ublas::begin(std::get<0>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<0>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<1>(reference_)), ublas::begin(std::get<1>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<1>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<2>(reference_)), ublas::begin(std::get<2>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<2>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<3>(reference_)), ublas::begin(std::get<3>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<3>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<4>(reference_)), ublas::begin(std::get<4>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<4>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<5>(reference_)), ublas::begin(std::get<5>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<5>(span_vectors_) ) ) ) ); } -using test_types = std::tuple; +using test_types = std::tuple; BOOST_FIXTURE_TEST_CASE_TEMPLATE( offset_test, layout, test_types, fixture_span_vector_shape ) { - using namespace boost::numeric; - using strides = ublas::basic_strides; + namespace ublas = boost::numeric::ublas; + { auto s = std::get<0>(span_vectors_); - auto w = strides( std::get<0>(extents_) ); + auto w = ublas::to_strides( std::get<0>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<1>(span_vectors_); - auto w = strides( std::get<1>(extents_) ); + auto w = ublas::to_strides( std::get<1>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<2>(span_vectors_); - auto w = strides( std::get<2>(extents_) ); + auto w = ublas::to_strides( std::get<2>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<3>(span_vectors_); - auto w = strides( std::get<3>(extents_) ); + auto w = ublas::to_strides( std::get<3>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] ); } { auto s = std::get<4>(span_vectors_); - auto w = strides( std::get<4>(extents_) ); + auto w = ublas::to_strides( std::get<4>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] ); } @@ -350,7 +349,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( offset_test, layout, test_types, fixture_span_ { auto s = std::get<5>(span_vectors_); - auto w = ( std::get<5>(extents_) ); + auto w = ublas::to_strides( std::get<5>(extents_), layout{} ); auto o = ublas::detail::compute_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] + s[3].first()*w[3] ); } @@ -391,4 +390,4 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( span_strides_test, layout, test_types, fixture #endif -BOOST_AUTO_TEST_SUITE_END(); +BOOST_AUTO_TEST_SUITE_END() From 07a9cf5fabf64287a551e00b8c6f582f46bcdaf5 Mon Sep 17 00:00:00 2001 From: Cem Bassoy Date: Fri, 4 Jun 2021 21:23:03 +0200 Subject: [PATCH 08/40] product function for tensor-times-vector improved. --- .github/workflows/windows.yml | 2 +- .../tensor/extents/extents_functions.hpp | 31 +++ .../tensor/function/tensor_times_vector.hpp | 258 +++++++++++++++--- .../numeric/ublas/tensor/multiplication.hpp | 102 +++---- .../boost/numeric/ublas/tensor/subtensor.hpp | 6 +- .../ublas/tensor/subtensor_utility.hpp | 8 +- .../tensor/tensor/tensor_static_rank.hpp | 15 + test/tensor/Jamfile | 5 +- test/tensor/test_functions.cpp | 156 +++++------ test/tensor/test_multiplication.cpp | 38 ++- test/tensor/test_subtensor.cpp | 4 +- test/tensor/test_subtensor_utility.cpp | 24 +- 12 files changed, 437 insertions(+), 212 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 7bdfc69be..b1a4e778a 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -24,7 +24,7 @@ jobs: # - {os: windows-2016, toolset: msvc, version: 14.16, cxxstd: 11} # - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 11} # - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: 17} - - {os: windows-2019, toolset: msvc, version: 14.28, cxxstd: latest} + - {os: windows-2019, toolset: msvc, version: 14.29, cxxstd: latest} steps: - uses: actions/checkout@v2 diff --git a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp index 149e12bde..cfa247b4d 100644 --- a/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp +++ b/include/boost/numeric/ublas/tensor/extents/extents_functions.hpp @@ -77,6 +77,37 @@ template // std::all_of(cbegin(e)+2,cend(e) , [](auto a){return a==1UL;}); } +/** @brief Returns true if extents equals (m,[1,1,...,1]) with m>=1 */ +template +[[nodiscard]] inline constexpr bool is_row_vector(extents_base const& e) +{ + if (empty(e) || size(e) == 1 ) {return false;} + + if(cbegin(e)[0] == 1ul && + cbegin(e)[1] > 1ul && + std::all_of(cbegin(e)+2ul,cend (e) , [](auto a){return a==1ul;})){ + return true; + } + + return false; +} + + +/** @brief Returns true if extents equals (m,[1,1,...,1]) with m>=1 */ +template +[[nodiscard]] inline constexpr bool is_col_vector(extents_base const& e) +{ + if (empty(e) || size(e) == 1 ) {return false;} + + if(cbegin(e)[0] > 1ul && + cbegin(e)[1] == 1ul && + std::all_of(cbegin(e)+2ul,cend (e) , [](auto a){return a==1ul;})){ + return true; + } + + return false; +} + /** @brief Returns true if (m,[n,1,...,1]) with m>=1 or n>=1 */ template [[nodiscard]] inline constexpr bool is_matrix(extents_base const& e) diff --git a/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp index 5702f97fb..611c5a82e 100644 --- a/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp +++ b/include/boost/numeric/ublas/tensor/function/tensor_times_vector.hpp @@ -15,6 +15,7 @@ #include #include +#include "../multiplication.hpp" #include "../extents.hpp" #include "../type_traits.hpp" #include "../tags.hpp" @@ -49,6 +50,190 @@ using enable_ttv_if_extent_has_dynamic_rank = std::enable_if_t +inline auto scalar_scalar_prod(TA const &a, V const &b, EC const& nc_base) +{ + assert(ublas::is_scalar(a.extents())); + using tensor = TC; + using value = typename tensor::value_type; + using shape = typename tensor::extents_type; + return tensor(shape(nc_base),value(a[0]*b(0))); +} + +template +inline auto vector_vector_prod(TA const &a, V const &b, EC& nc_base, std::size_t m) +{ + auto const& na = a.extents(); + + assert( ublas::is_vector(na)); + assert(!ublas::is_scalar(na)); + assert( ublas::size(na) > 1u); + assert(m > 0); + + using tensor = TC; + using value = typename tensor::value_type; + using shape = typename tensor::extents_type; + + auto const n1 = na[0]; + auto const n2 = na[1]; + auto const s = b.size(); + + // general + // [n1 n2 1 ... 1] xj [s 1] for any 1 <= j <= p with n1==1 or n2==1 + + + // [n1 1 1 ... 1] x1 [n1 1] -> [1 1 1 ... 1] + // [1 n2 1 ... 1] x2 [n2 1] -> [1 1 1 ... 1] + + + assert(n1>1 || n2>1); + + if( (n1>1u && m==1u) || (n2>1u && m==2u) ){ + if(m==1u) assert(n2==1u && n1==s); + if(m==2u) assert(n1==1u && n2==s); + auto cc = std::inner_product( a.begin(), a.end(), b.begin(), value(0) ); + return tensor(shape(nc_base),value(cc)); + } + + // [n1 1 1 ... 1] xj [1 1] -> [n1 1 1 ... 1] with j != 1 + // [1 n2 1 ... 1] xj [1 1] -> [1 n2 1 ... 1] with j != 2 + +//if( (n1>1u && m!=1u) && (n2>0u && m!=2u) ){ + + if(n1>1u) assert(m!=1u); + if(n2>1u) assert(m!=2u); + assert(s==1u); + + if(n1>1u) assert(n2==1u); + if(n2>1u) assert(n1==1u); + + if(n1>1u) nc_base[0] = n1; + if(n2>1u) nc_base[1] = n2; + + auto bb = b(0); + auto c = tensor(shape(nc_base)); + std::transform(a.begin(),a.end(),c.begin(),[bb](auto aa){ return aa*bb; }); + return c; +//} + + +} + + +/** Computes a matrix-vector product. + * + * + * @note assume stride 1 for specific dimensions and therefore requires refactoring for subtensor + * +*/ +template +inline auto matrix_vector_prod(TA const &a, V const &b, EC& nc_base, std::size_t m) +{ + auto const& na = a.extents(); + + assert( ublas::is_matrix(na)); + assert(!ublas::is_vector(na)); + assert(!ublas::is_scalar(na)); + assert( ublas::size(na) > 1u); + assert(m > 0); + + using tensor = TC; + using shape = typename tensor::extents_type; + using size_t = typename shape::value_type; + + auto const n1 = na[0]; + auto const n2 = na[1]; + auto const s = b.size(); + + // general + // [n1 n2 1 ... 1] xj [s 1] for any 1 <= j <= p with either n1>1 and n2>1 + + + // if [n1 n2 1 ... 1] xj [1 1] -> [n1 n2 1 ... 1] for j > 2 + if(m > 2){ + nc_base[0] = n1; + nc_base[1] = n2; + assert(s == 1); + auto c = tensor(shape(nc_base)); + auto const bb = b(0); + std::transform(a.begin(),a.end(), c.begin(), [bb](auto aa){return aa*bb;}); + return c; + } + + + // [n1 n2 1 ... 1] x1 [n1 1] -> [n2 1 ... 1] -> vector-times-matrix + // [n1 n2 1 ... 1] x2 [n2 1] -> [n1 1 ... 1] -> matrix-times-vector + + nc_base[0] = m==1 ? n2 : n1; + + auto c = tensor(shape(nc_base)); + auto const& wa = a.strides(); + auto const* bdata = &(b(0)); + + detail::recursive::mtv(m-1,n1,n2, c.data(), size_t(1), a.data(), wa[0], wa[1], bdata, size_t(1)); + + return c; +} + + + +template +inline auto tensor_vector_prod(TA const &a, V const &b, EC& nc_base, std::size_t m) +{ + auto const& na = a.extents(); + + assert( ublas::is_tensor(na)); + assert( ublas::size(na) > 1u); + assert(m > 0); + + using tensor = TC; + using shape = typename tensor::extents_type; + using layout = typename tensor::layout_type; + + auto const pa = a.rank(); + auto const nm = na[m-1]; + auto const s = b.size(); + + auto nb = extents<2>{std::size_t(b.size()),std::size_t(1ul)}; + auto wb = ublas::to_strides(nb,layout{} ); + + //TODO: Include an outer product when legacy vector becomes a new vector. + + for (auto i = 0ul, j = 0ul; i < pa; ++i) + if (i != m - 1) + nc_base[j++] = na.at(i); + + auto c = tensor(shape(nc_base)); + + // [n1 n2 ... nm ... np] xm [1 1] -> [n1 n2 ... nm-1 nm+1 ... np] + + if(s == 0){ + assert(nm == 1); + auto const bb = b(0); + std::transform(a.begin(),a.end(), c.begin(), [bb](auto aa){return aa*bb;}); + return c; + } + + + // if [n1 n2 n3 ... np] xm [nm 1] -> [n1 n2 ... nm-1 nm+1 ... np] + + auto const& nc = c.extents(); + auto const& wc = c.strides(); + auto const& wa = a.strides(); + auto const* bp = &(b(0)); + + ttv(m, pa, + c.data(), nc.data(), wc.data(), + a.data(), na.data(), wa.data(), + bp, nb.data(), wb.data()); + + return c; +} + +}//namespace detail + + /** @brief Computes the m-mode tensor-times-vector product * * Implements C[i1,...,im-1,im+1,...,ip] = A[i1,i2,...,ip] * b[im] @@ -63,45 +248,49 @@ using enable_ttv_if_extent_has_dynamic_rank = std::enable_if_t::value, detail::enable_ttv_if_extent_has_dynamic_rank = true > -inline decltype(auto) prod( tensor_core< TE > const &a, vector const &b, const std::size_t m) +inline auto prod( tensor_core< TE > const &a, vector const &b, const std::size_t m) { using tensor = tensor_core< TE >; using shape = typename tensor::extents_type; - using value = typename tensor::value_type; - using layout = typename tensor::layout_type; using resize_tag = typename tensor::resizable_tag; - auto const p = a.rank(); + auto const pa = a.rank(); static_assert(std::is_same_v); static_assert(is_dynamic_v); if (m == 0ul) throw std::length_error("error in boost::numeric::ublas::prod(ttv): contraction mode must be greater than zero."); - if (p < m) throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the contraction mode."); + if (pa < m) throw std::length_error("error in boost::numeric::ublas::prod(ttv): rank of tensor must be greater than or equal to the contraction mode."); if (a.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): first argument tensor should not be empty."); if (b.empty()) throw std::length_error("error in boost::numeric::ublas::prod(ttv): second argument vector should not be empty."); auto const& na = a.extents(); - auto nb = extents<2>{std::size_t(b.size()),std::size_t(1ul)}; - auto wb = ublas::to_strides(nb,layout{} ); + + if(b.size() != na[m-1]) throw std::length_error("error in boost::numeric::ublas::prod(ttv): dimension mismatch of tensor and vector."); auto const sz = std::max( std::size_t(ublas::size(na)-1u), std::size_t(2) ); auto nc_base = typename shape::base_type(sz,1); - for (auto i = 0ul, j = 0ul; i < p; ++i) - if (i != m - 1) - nc_base[j++] = na.at(i); + // output scalar tensor + if(ublas::is_scalar(na)){ + return detail::scalar_scalar_prod(a,b,nc_base); + } + + // output scalar tensor or vector tensor + if (ublas::is_vector(na)){ + return detail::vector_vector_prod(a,b,nc_base,m); + } + + // output scalar tensor or vector tensor + if (ublas::is_matrix(na)){ + return detail::matrix_vector_prod(a,b,nc_base,m); + } + + assert(ublas::is_tensor(na)); + return detail::tensor_vector_prod(a,b,nc_base,m); - auto nc = shape(nc_base); - auto c = tensor( nc, value{} ); - auto const* bb = &(b(0)); - ttv(m, p, - c.data(), c.extents().data(), c.strides().data(), - a.data(), a.extents().data(), a.strides().data(), - bb, nb.data(), wb.data()); - return c; } @@ -143,7 +332,6 @@ inline auto prod( tensor_core< TE > const &a, vector const &b, const std:: constexpr auto p = std::tuple_size_v; constexpr auto sz = std::max(std::size_t(std::tuple_size_v-1U),std::size_t(2)); - using shape_b = ublas::extents<2>; using shape_c = ublas::extents; using tensor_c = tensor_core>; @@ -158,21 +346,25 @@ inline auto prod( tensor_core< TE > const &a, vector const &b, const std:: auto nc_base = typename shape_c::base_type{}; std::fill(nc_base.begin(), nc_base.end(),std::size_t(1)); - for (auto i = 0ul, j = 0ul; i < p; ++i) - if (i != m - 1) - nc_base[j++] = na.at(i); - auto nc = shape_c(std::move(nc_base)); - auto nb = shape_b{b.size(),1UL}; - auto wb = ublas::to_strides(nb,layout{}); - auto c = tensor_c( std::move(nc) ); - auto const* bb = &(b(0)); - ttv(m, p, - c.data(), c.extents().data(), c.strides().data(), - a.data(), a.extents().data(), a.strides().data(), - bb, nb.data(), wb.data() ); - return c; + // output scalar tensor + if(ublas::is_scalar(na)){ + return detail::scalar_scalar_prod(a,b,nc_base); + } + + // output scalar tensor or vector tensor + if (ublas::is_vector(na)){ + return detail::vector_vector_prod(a,b,nc_base,m); + } + + // output scalar tensor or vector tensor + if (ublas::is_matrix(na)){ + return detail::matrix_vector_prod(a,b,nc_base,m); + } + + assert(ublas::is_tensor(na)); + return detail::tensor_vector_prod(a,b,nc_base,m); } @@ -201,7 +393,7 @@ inline auto prod( tensor_core< TE > const &a, vector const &b) using shape = typename tensor::extents; using layout = typename tensor::layout; using shape_b = extents<2>; - using shape_c = remove_element_t; + using shape_c = remove_element_t; // this is wrong using container_c = rebind_storage_size_t; using tensor_c = tensor_core>; diff --git a/include/boost/numeric/ublas/tensor/multiplication.hpp b/include/boost/numeric/ublas/tensor/multiplication.hpp index e2d94f7be..ea7901814 100644 --- a/include/boost/numeric/ublas/tensor/multiplication.hpp +++ b/include/boost/numeric/ublas/tensor/multiplication.hpp @@ -337,33 +337,44 @@ void ttv0(SizeType const r, /** @brief Computes the matrix-times-vector product * - * Implements C[i1] = sum(A[i1,i2] * b[i2]) or C[i2] = sum(A[i1,i2] * b[i1]) - * - * @note is used in function ttv - * - * @param[in] m zero-based contraction mode with m=0 or m=1 - * @param[out] c pointer to the output tensor C - * @param[in] nc pointer to the extents of tensor C - * @param[in] wc pointer to the strides of tensor C - * @param[in] a pointer to the first input tensor A - * @param[in] na pointer to the extents of input tensor A - * @param[in] wa pointer to the strides of input tensor A - * @param[in] b pointer to the second input tensor B + * Implements C[i1] = sum(A[i1,i2] * B[i2]) if k = 1 or C[i2] = sum(A[i1,i2] * B[i1]) if k = 0 + * + * [m,n] = size(A(..,:,..,:,..)) + * [m] = size(C(..,:,..)) + * [n] = size(B(..,:,..)) + * + * + * @param[in] k if k = 0 + * @param[in] m number of rows of A + * @param[in] n number of columns of A + * @param[out] c pointer to C + * @param[in] wc m-th (k=1) or n-th (k=0) stride for C + * @param[in] a pointer to A + * @param[in] wa_m m-th stride for A + * @param[in] wa_n n-th stride for A + * @param[in] b pointer to B + * @param[in] wb n-th (k=1) or m-th (k=0) stride for B */ template -void mtv(SizeType const m, - PointerOut c, SizeType const*const /*unsed*/, SizeType const*const wc, - PointerIn1 a, SizeType const*const na , SizeType const*const wa, - PointerIn2 b) +void mtv(SizeType const k, + SizeType const m, + SizeType const n, + PointerOut c, SizeType const wc, + PointerIn1 a, SizeType const wa_m, SizeType const wa_n, + PointerIn2 b, SizeType const wb) { - // decides whether matrix multiplied with vector or vector multiplied with matrix - const auto o = (m == 0) ? 1 : 0; - for(auto io = 0u; io < na[o]; c += wc[o], a += wa[o], ++io) { + auto const wa_x = k==0 ? wa_n : wa_m; + auto const wa_y = k==0 ? wa_m : wa_n; + + auto const x = k==0 ? n : m; + auto const y = k==0 ? m : n; + + for(auto ix = 0u; ix < x; c += wc, a += wa_x, ++ix) { auto c1 = c; auto a1 = a; auto b1 = b; - for(auto im = 0u; im < na[m]; a1 += wa[m], ++b1, ++im) { + for(auto iy = 0u; iy < y; a1 += wa_y, b1 += wb, ++iy) { *c1 += *a1 * *b1; } } @@ -603,10 +614,12 @@ namespace boost::numeric::ublas { * C[i1,i2,...,im-1,im+1,...,ip] = sum(A[i1,i2,...,im,...,ip] * b[im]) for m>1 and * C[i2,...,ip] = sum(A[i1,...,ip] * b[i1]) for m=1 * - * @note calls detail::ttv, detail::ttv0 or detail::mtv + * @note calls detail::ttv, detail::ttv0 for p == 1 or p == 2 use ublas::inner or ublas::mtv or ublas::vtm + * + * * * @param[in] m contraction mode with 0 < m <= p - * @param[in] p number of dimensions (rank) of the first input tensor with p > 0 + * @param[in] p number of dimensions (rank) of the first input tensor with p > 2 * @param[out] c pointer to the output tensor with rank p-1 * @param[in] nc pointer to the extents of tensor c * @param[in] wc pointer to the strides of tensor c @@ -621,50 +634,25 @@ template void ttv(SizeType const m, SizeType const p, PointerOut c, SizeType const*const nc, SizeType const*const wc, const PointerIn1 a, SizeType const*const na, SizeType const*const wa, - const PointerIn2 b, SizeType const*const nb, SizeType const*const wb) + const PointerIn2 b, SizeType const*const nb, SizeType const*const /*unused*/) { - static_assert( std::is_pointer::value && std::is_pointer::value & std::is_pointer::value, + static_assert( std::is_pointer::value && std::is_pointer::value && std::is_pointer::value, "Static error in boost::numeric::ublas::ttv: Argument types for pointers are not pointer types."); - if( m == 0){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Contraction mode must be greater than zero."); - } + assert(m != 0); + assert(p >= m); + assert(p >= 2); - if( p < m ){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater equal the modus."); - } - if( p == 0){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Rank must be greater than zero."); - } - if(c == nullptr || a == nullptr || b == nullptr){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Pointers shall not be null pointers."); - } for(auto i = 0u; i < m-1; ++i){ - if(na[i] != nc[i]){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Extents (except of dimension mode) of A and C must be equal."); - } - } - - const auto max = std::max(nb[0], nb[1]); - if( na[m-1] != max){ - throw std::length_error("Error in boost::numeric::ublas::ttv: Extent of dimension mode of A and b must be equal."); + assert(na[i] == nc[i]); } + assert(na[m-1] == std::max(nb[0], nb[1])); - if((m != 1) && (p > 2)){ - detail::recursive::ttv(m-1, p-1, p-2, c, nc, wc, a, na, wa, b); - } - else if ((m == 1) && (p > 2)){ + if(m == 1) detail::recursive::ttv0(p-1, c, nc, wc, a, na, wa, b); - } - else if( p == 2 ){ - detail::recursive::mtv(m-1, c, nc, wc, a, na, wa, b); - } - else /*if( p == 1 )*/{ - auto v = std::remove_pointer_t>{}; - *c = detail::recursive::inner(SizeType(0), na, a, wa, b, wb, v); - } - + else + detail::recursive::ttv (m-1, p-1, p-2, c, nc, wc, a, na, wa, b); } diff --git a/include/boost/numeric/ublas/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/subtensor.hpp index e8a150d16..a119300b7 100644 --- a/include/boost/numeric/ublas/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor.hpp @@ -129,10 +129,10 @@ class subtensor > subtensor(tensor_type& t, span_types&& ... spans) : super_type () , spans_ (detail::generate_span_vector(t.extents(),std::forward(spans)...)) - , extents_ (detail::compute_extents(spans_)) + , extents_ (detail::to_extents(spans_)) , strides_ (ublas::to_strides(extents_,layout_type{})) - , span_strides_ (detail::compute_span_strides(t.strides(),spans_)) - , data_ {t.data() + detail::compute_offset(t.strides(), spans_)} + , span_strides_ (detail::to_span_strides(t.strides(),spans_)) + , data_ {t.data() + detail::to_offset(t.strides(), spans_)} { // if( m == nullptr) // throw std::length_error("Error in tensor_view::tensor_view : multi_array_type is nullptr."); diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp index 4c38be404..4b203c138 100644 --- a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -37,10 +37,10 @@ namespace boost::numeric::ublas::detail { * @param[in] spans vector of spans of the subtensor */ template -auto compute_span_strides(std::vector const& strides, Spans const& spans) +auto to_span_strides(std::vector const& strides, Spans const& spans) { if(strides.size() != spans.size()) - throw std::runtime_error("Error in boost::numeric::ublas::subtensor::compute_span_strides(): tensor strides.size() != spans.size()"); + throw std::runtime_error("Error in boost::numeric::ublas::subtensor::to_span_strides(): tensor strides.size() != spans.size()"); auto span_strides = std::vector(spans.size()); @@ -60,7 +60,7 @@ auto compute_span_strides(std::vector const& strides, Spans const& sp * @param[in] spans vector of spans of the subtensor */ template -auto compute_offset(std::vector const& strides, Spans const& spans) +auto to_offset(std::vector const& strides, Spans const& spans) { if(strides.size() != spans.size()) throw std::runtime_error("Error in boost::numeric::ublas::subtensor::offset(): tensor strides.size() != spans.size()"); @@ -77,7 +77,7 @@ auto compute_offset(std::vector const& strides, Spans const& spans) * @param[in] spans vector of spans of the subtensor */ template -auto compute_extents(spans_type const& spans) +auto to_extents(spans_type const& spans) { using extents_t = extents<>; using base_type = typename extents_t::base_type; diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp index fbb5074db..3a9205480 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp @@ -111,6 +111,21 @@ template { } + /** @brief Constructs a tensor_core with a \c shape and initial value + * + * @code auto t = tensor(extents<>{4,3,2},5); @endcode + * + * @param i initial tensor_core with this value + */ + inline tensor_core (extents_type e, value_type i) + : tensor_expression_type{} + , _extents(std::move(e)) + , _strides(to_strides(_extents,layout_type{})) + , _container(product(_extents)) + { + std::fill(begin(),end(),i); + } + /** @brief Constructs a tensor_core with a \c shape and initiates it with one-dimensional data * * @code auto t = tensor(extents<>{3,4,2},std::vector(3*4*2,1.f)); @endcode diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index 9ee07cd49..ffeb7173c 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -32,12 +32,9 @@ explicit unit_test_framework ; test-suite boost-ublas-tensor-test : -<<<<<<< HEAD [ run test_access.cpp test_algorithms.cpp test_einstein_notation.cpp - test_subtensor.cpp - test_subtensor_utility.cpp test_expression.cpp test_expression_evaluation.cpp test_extents_dynamic.cpp @@ -65,6 +62,8 @@ test-suite boost-ublas-tensor-test test_static_tensor.cpp test_static_tensor_matrix_vector.cpp test_strides.cpp + test_subtensor.cpp + test_subtensor_utility.cpp test_tensor.cpp test_tensor_matrix_vector.cpp unit_test_framework diff --git a/test/tensor/test_functions.cpp b/test/tensor/test_functions.cpp index 920fdac9a..4e8e4f464 100644 --- a/test/tensor/test_functions.cpp +++ b/test/tensor/test_functions.cpp @@ -1,4 +1,4 @@ -// +// // Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com // Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com // @@ -25,127 +25,126 @@ BOOST_AUTO_TEST_SUITE ( test_tensor_functions) -using test_types = zip>::with_t; +using test_types = zip::with_t; +// int,float,std::complex //using test_types = zip::with_t; struct fixture { - using dynamic_extents_type = boost::numeric::ublas::extents<>; - fixture() - : extents { - dynamic_extents_type{1,1}, // 1 - dynamic_extents_type{2,3}, // 2 - dynamic_extents_type{2,3,1}, // 3 - dynamic_extents_type{4,2,3}, // 4 - dynamic_extents_type{4,2,3,5}} // 5 - { - } + using shape = boost::numeric::ublas::extents<>; - std::vector extents; + const std::vector extents + { + shape{1,1}, // 1 + shape{2,1}, // 2 + shape{1,2}, // 3 + shape{2,3}, // 4 + shape{2,3,1}, // 5 + shape{1,2,3}, // 6 + shape{3,1,2}, // 7 + shape{4,2,3}, // 8 + shape{4,2,3,1}, // 9 + shape{4,2,3,5} // 10 + }; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_vector, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_vector, pair, test_types, fixture ) { - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::tensor_dynamic; - using vector_type = typename tensor_type::vector_type; - - - for(auto const& n : extents){ + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; - auto a = tensor_type(n, value_type{2}); - std::cout << "a=" << a << std::endl; + for(auto const& n : extents){ - for(auto m = 0u; m < ublas::size(n); ++m){ + auto v = value(2); + auto a = tensor(n, v); - auto b = vector_type (n[m], value_type{1} ); + for(auto m = 0u; m < ublas::size(n); ++m){ - std::cout << "b=" << tensor_type(b) << std::endl; + auto b = vector (n[m], value{1} ); - std::cout << "m=" << m << std::endl; + auto c = ublas::prod(a, b, m+1); - auto c = ublas::prod(a, b, m+1); + auto vv = v * value(n[m]); - std::cout << "c=" << tensor_type(c) << std::endl; + BOOST_CHECK( std::all_of( c.begin(), c.end() , [vv](auto cc){ return cc == vv; } ) ); - for(auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , value_type( static_cast< inner_type_t >(n[m]) ) * a[i] ); - - } } - auto n = extents[4]; - auto a = tensor_type(n, value_type{2}); - auto b = vector_type(n[0], value_type{1}); + } +// auto n = extents[4]; +// auto a = tensor_type(n, value_type{2}); +// auto b = vector_type(n[0], value_type{1}); - auto empty = vector_type{}; +// auto empty = vector_type{}; - BOOST_CHECK_THROW(prod(a, b, 0), std::length_error); - BOOST_CHECK_THROW(prod(a, b, 9), std::length_error); - BOOST_CHECK_THROW(prod(a, empty, 2), std::length_error); +// BOOST_CHECK_THROW(prod(a, b, 0), std::length_error); +// BOOST_CHECK_THROW(prod(a, b, 9), std::length_error); +// BOOST_CHECK_THROW(prod(a, empty, 2), std::length_error); } BOOST_AUTO_TEST_CASE( test_tensor_prod_vector_exception ) { -// namespace ublas = boost::numeric::ublas; -// using value_type = float; -// using layout_type = ublas::layout::first_order; -// using d_tensor_type = ublas::tensor_dynamic; -// using vector_type = typename d_tensor_type::vector_type; + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = ublas::layout::first_order; + using tensor = ublas::tensor_dynamic; + using vector = typename tensor::vector_type; -// auto t1 = d_tensor_type{ublas::extents<>{},1.f}; -// auto v1 = vector_type{3,value_type{1}}; + auto t1 = tensor{ublas::extents<>{},1.f}; + auto v1 = vector{3,value{1}}; -// BOOST_REQUIRE_THROW(prod(t1,v1,0),std::length_error); -// BOOST_REQUIRE_THROW(prod(t1,v1,1),std::length_error); -// BOOST_REQUIRE_THROW(prod(t1,v1,3),std::length_error); + BOOST_REQUIRE_THROW(prod(t1,v1,0),std::length_error); + BOOST_REQUIRE_THROW(prod(t1,v1,1),std::length_error); + BOOST_REQUIRE_THROW(prod(t1,v1,3),std::length_error); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_matrix, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_matrix, pair, test_types, fixture ) { - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::tensor_dynamic; - using matrix_type = typename tensor_type::matrix_type; + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + using tensor = ublas::tensor_dynamic; + using matrix = typename tensor::matrix_type; - for(auto const& n : extents) { + for(auto const& n : extents) { - auto a = tensor_type(n, value_type{2}); + auto v = value{2}; + auto a = tensor(n, v); - for(auto m = 0u; m < ublas::size(n); ++m){ + for(auto m = 0u; m < ublas::size(n); ++m){ - auto b = matrix_type ( n[m], n[m], value_type{1} ); + auto b = matrix ( n[m], n[m], value{1} ); - auto c = ublas::prod(a, b, m+1); + auto c = ublas::prod(a, b, m+1); - for(auto i = 0u; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , value_type( static_cast< inner_type_t >(n[m]) ) * a[i] ); + auto vv = v * value(n[m]); + BOOST_CHECK( std::all_of( c.begin(), c.end() , [vv](auto cc){ return cc == vv; } ) ); - } } + } - auto n = extents[4]; - auto a = tensor_type(n, value_type{2}); - auto b = matrix_type(n[0], n[0], value_type{1}); + // auto n = extents[4]; + // auto a = tensor_type(n, value_type{2}); + // auto b = matrix_type(n[0], n[0], value_type{1}); - auto empty = matrix_type{}; + // auto empty = matrix_type{}; - BOOST_CHECK_THROW(prod(a, b, 0), std::length_error); - BOOST_CHECK_THROW(prod(a, b, 9), std::length_error); - BOOST_CHECK_THROW(prod(a, empty, 2), std::invalid_argument); + // BOOST_CHECK_THROW(prod(a, b, 0), std::length_error); + // BOOST_CHECK_THROW(prod(a, b, 9), std::length_error); + // BOOST_CHECK_THROW(prod(a, empty, 2), std::invalid_argument); } BOOST_AUTO_TEST_CASE( test_tensor_prod_matrix_exception ) @@ -196,8 +195,8 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_1, value, test_types, for(auto i = 0ul; i < q; ++i) acc *= value_type( static_cast< inner_type_t >( a.extents().at(phi.at(i)-1) ) ); - for(auto i = 0ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , acc * a[0] * b[0] ); + auto vv = acc * a[0] * b[0]; + BOOST_CHECK( std::all_of(c.begin(), c.end() , [vv](auto cc){ return cc == vv; } ) ); } } @@ -296,8 +295,13 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_prod_tensor_2, value, test_types, for(auto i = 0ul; i < q; ++i) acc *= value_type( static_cast< inner_type_t >( a.extents().at(phia.at(i)-1) ) ); - for(auto i = 0ul; i < c.size(); ++i) - BOOST_CHECK_EQUAL( c[i] , acc * a[0] * b[0] ); + + auto vv = acc * a[0] * b[0]; + BOOST_CHECK( std::all_of(c.begin(), c.end() , [vv](auto cc){ return cc == vv; } ) ); + + +// for(auto i = 0ul; i < c.size(); ++i) +// BOOST_CHECK_EQUAL( c[i] , acc * a[0] * b[0] ); } diff --git a/test/tensor/test_multiplication.cpp b/test/tensor/test_multiplication.cpp index c5ed51e5f..0f9700915 100644 --- a/test/tensor/test_multiplication.cpp +++ b/test/tensor/test_multiplication.cpp @@ -25,7 +25,9 @@ BOOST_AUTO_TEST_SUITE (test_tensor_contraction) -using test_types = zip>::with_t; +//using test_types = zip>::with_t; + +using test_types = zip::with_t; //using test_types = zip::with_t; @@ -62,38 +64,38 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(test_tensor_mtv, value, test_types, fixture ) for(auto const& na : extents) { - if(ublas::size(na) > 2) + if(ublas::is_scalar(na) || ublas::is_vector(na) || ublas::is_tensor(na)) continue; + auto const n1 = na[0]; + auto const n2 = na[1]; + auto a = vector_t(ublas::product(na), value_t{2}); auto wa = ublas::to_strides(na,layout_t{}); - for(auto m = std::size_t{0}; m < ublas::size(na); ++m){ + + for(auto m = std::size_t{0}; m < 2; ++m){ + auto nb = extents_t {na[m],std::size_t{1}}; auto wb = ublas::to_strides(nb,layout_t{}); auto b = vector_t (ublas::product(nb), value_t{1} ); - auto nc_base = extents_base_t(std::max(std::size_t{ublas::size(na)-1u}, std::size_t{2}), 1); + // [n1 n2 1 ... 1] x1 [n1 1] -> [n2 1 ... 1] + // [n1 n2 1 ... 1] x2 [n2 1] -> [n1 1 ... 1] - for(auto i = 0ul, j = 0ul; i < ublas::size(na); ++i) - if(i != m) - nc_base[j++] = na[i]; + auto nc_base = extents_base_t(std::max(std::size_t(ublas::size(na)-1u), std::size_t{2}), 1); + nc_base[0] = m==0 ? n2 : n1; auto nc = extents_t (nc_base); auto wc = ublas::to_strides(nc,layout_t{}); auto c = vector_t (ublas::product(nc), value_t{0}); - ublas::detail::recursive::mtv( - m, - c.data(), nc.data(), wc.data(), - a.data(), na.data(), wa.data(), - b.data()); + assert( (m==0u) || (m==1u)); + + ublas::detail::recursive::mtv(m, n1,n2, c.data(), size_t(1), a.data(), wa[0], wa[1], b.data(), size_t(1)); auto v = value_t(na[m]); BOOST_CHECK(std::equal(c.begin(),c.end(),a.begin(), [v](auto cc, auto aa){return cc == v*aa;})); -// for(auto i = 0u; i < c.size(); ++i) -// BOOST_CHECK_EQUAL( c[i] , value_t( static_cast< inner_type_t >(na[m]) ) * a[i] ); - } } } @@ -132,10 +134,6 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_mtm, value, test_types, fixture ) auto v = value_t(na[1])*a[0]; BOOST_CHECK(std::all_of(c.begin(),c.end(), [v](auto cc){return cc == v;})); -// for(auto i = 0u; i < c.size(); ++i) -// BOOST_CHECK_EQUAL( c[i] , value_t( static_cast< inner_type_t >(na[1]) ) * a[0] ); - - } } @@ -176,8 +174,6 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ttv, value, test_types, fixture ) auto v = value_t(na[m]); BOOST_CHECK(std::equal(c.begin(),c.end(),a.begin(), [v](auto cc, auto aa){return cc == v*aa;})); -// for(auto i = 0u; i < c.size(); ++i) -// BOOST_CHECK_EQUAL( c[i] , value_t(na[m]) * a[i] ); } } diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index f3db7334d..d091f4375 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -128,8 +128,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) auto A = tensor_type{1,2}; auto Asub = subtensor_type( A, 0, 1 ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), 1 ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), 1 ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); BOOST_CHECK_EQUAL( Asub.strides().at(0), 1 ); BOOST_CHECK_EQUAL( Asub.strides().at(1), 1 ); diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index b96fb5b57..c1fab9fa1 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -293,12 +293,12 @@ BOOST_FIXTURE_TEST_CASE( extents_test, fixture_span_vector_shape ) { namespace ublas = boost::numeric::ublas; - BOOST_CHECK ( std::equal( ublas::begin(std::get<0>(reference_)), ublas::begin(std::get<0>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<0>(span_vectors_) ) ) ) ); - BOOST_CHECK ( std::equal( ublas::begin(std::get<1>(reference_)), ublas::begin(std::get<1>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<1>(span_vectors_) ) ) ) ); - BOOST_CHECK ( std::equal( ublas::begin(std::get<2>(reference_)), ublas::begin(std::get<2>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<2>(span_vectors_) ) ) ) ); - BOOST_CHECK ( std::equal( ublas::begin(std::get<3>(reference_)), ublas::begin(std::get<3>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<3>(span_vectors_) ) ) ) ); - BOOST_CHECK ( std::equal( ublas::begin(std::get<4>(reference_)), ublas::begin(std::get<4>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<4>(span_vectors_) ) ) ) ); - BOOST_CHECK ( std::equal( ublas::begin(std::get<5>(reference_)), ublas::begin(std::get<5>(reference_)), ublas::begin(ublas::detail::compute_extents( std::get<5>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<0>(reference_)), ublas::begin(std::get<0>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<0>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<1>(reference_)), ublas::begin(std::get<1>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<1>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<2>(reference_)), ublas::begin(std::get<2>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<2>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<3>(reference_)), ublas::begin(std::get<3>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<3>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<4>(reference_)), ublas::begin(std::get<4>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<4>(span_vectors_) ) ) ) ); + BOOST_CHECK ( std::equal( ublas::begin(std::get<5>(reference_)), ublas::begin(std::get<5>(reference_)), ublas::begin(ublas::detail::to_extents( std::get<5>(span_vectors_) ) ) ) ); } @@ -314,35 +314,35 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( offset_test, layout, test_types, fixture_span_ { auto s = std::get<0>(span_vectors_); auto w = ublas::to_strides( std::get<0>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<1>(span_vectors_); auto w = ublas::to_strides( std::get<1>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<2>(span_vectors_); auto w = ublas::to_strides( std::get<2>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, 0 ); } { auto s = std::get<3>(span_vectors_); auto w = ublas::to_strides( std::get<3>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] ); } { auto s = std::get<4>(span_vectors_); auto w = ublas::to_strides( std::get<4>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] ); } @@ -350,7 +350,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( offset_test, layout, test_types, fixture_span_ { auto s = std::get<5>(span_vectors_); auto w = ublas::to_strides( std::get<5>(extents_), layout{} ); - auto o = ublas::detail::compute_offset(w,s); + auto o = ublas::detail::to_offset(w,s); BOOST_CHECK_EQUAL( o, s[0].first()*w[0] + s[1].first()*w[1] + s[2].first()*w[2] + s[3].first()*w[3] ); } From bd0f53335e6d18be531e9e4f56056b6ecb45adf2 Mon Sep 17 00:00:00 2001 From: "kannav.mehta" Date: Wed, 16 Jun 2021 22:28:39 +0530 Subject: [PATCH 09/40] experimental: initial subtensor_engine --- examples/tensor/Jamfile | 3 +- examples/tensor/subtensor.cpp | 22 +++ .../boost/numeric/ublas/tensor/expression.hpp | 2 +- include/boost/numeric/ublas/tensor/span.hpp | 131 ++++-------------- include/boost/numeric/ublas/tensor/tags.hpp | 2 + include/boost/numeric/ublas/tensor/tensor.hpp | 2 +- .../numeric/ublas/tensor/tensor/subtensor.hpp | 102 ++++++++++++++ .../ublas/tensor/tensor/subtensor_engine.hpp | 27 ++++ .../ublas/tensor/tensor/tensor_dynamic.hpp | 5 +- .../ublas/tensor/tensor/tensor_engine.hpp | 2 +- .../ublas/tensor/tensor/tensor_static.hpp | 5 +- .../tensor/tensor/tensor_static_rank.hpp | 7 +- .../ublas/tensor/traits/read_write_traits.hpp | 30 ++++ .../slice_traits.hpp} | 3 +- 14 files changed, 223 insertions(+), 120 deletions(-) create mode 100644 examples/tensor/subtensor.cpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/subtensor.hpp create mode 100644 include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp create mode 100644 include/boost/numeric/ublas/tensor/traits/read_write_traits.hpp rename include/boost/numeric/ublas/tensor/{slice_detail/type_traits_slice.hpp => traits/slice_traits.hpp} (91%) diff --git a/examples/tensor/Jamfile b/examples/tensor/Jamfile index 101a8a4c9..511c677e4 100644 --- a/examples/tensor/Jamfile +++ b/examples/tensor/Jamfile @@ -7,7 +7,7 @@ # http://www.boost.org/LICENSE_1_0.txt) import ../../../../config/checks/config : requires ; - + # Project settings project boost-ublas-tensor-example : requirements @@ -24,3 +24,4 @@ exe simple_expressions : simple_expressions.cpp ; exe multiply_tensors_product_function : multiply_tensors_product_function.cpp ; exe multiply_tensors_einstein_notation : multiply_tensors_einstein_notation.cpp ; exe instantiate_tensor : instantiate_tensor.cpp ; +exe subtensor : subtensor.cpp ; diff --git a/examples/tensor/subtensor.cpp b/examples/tensor/subtensor.cpp new file mode 100644 index 000000000..c557b2da2 --- /dev/null +++ b/examples/tensor/subtensor.cpp @@ -0,0 +1,22 @@ +#include + +using namespace boost::numeric::ublas; + +int main() { + { + const auto ts = tensor_dynamic<>(); + auto sts = make_subtensor(ts); + auto sts_sts = make_subtensor(sts); + auto sts_sts_sts = make_subtensor(sts_sts); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + } + { + auto ts = tensor_dynamic<>(); + auto sts = make_subtensor(ts); + auto sts_sts = make_subtensor(sts); + auto sts_sts_sts = make_subtensor(sts_sts); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + } +} diff --git a/include/boost/numeric/ublas/tensor/expression.hpp b/include/boost/numeric/ublas/tensor/expression.hpp index 47d534010..f3eee178d 100644 --- a/include/boost/numeric/ublas/tensor/expression.hpp +++ b/include/boost/numeric/ublas/tensor/expression.hpp @@ -20,7 +20,7 @@ namespace boost::numeric::ublas::detail { -/** @\brief base class for tensor expressions +/** @brief base class for tensor expressions * * \note implements crtp - no use of virtual function calls * diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 581e16dc6..2f9178f55 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -21,42 +21,26 @@ #include "concepts.hpp" -namespace boost::numeric::ublas::tag{ - -struct sliced {}; -struct strided {}; - -} // namespace boost::numeric::ublas::tag - namespace boost::numeric::ublas { /** \class span - * \ingroup Core_Module - * - * \brief Selection operator class to initialize stl::multi_subarray - * - * This class is used to generate stl::multi_subarray from stl::multi_array and to - * work on views. - * \note zero based indexing is used. - * - */ - - - -//using offsets = std::vector; - -template -class span; - + * \ingroup Core_Module + * + * \brief Selection operator class to initialize stl::multi_subarray + * + * This class is used to generate stl::multi_subarray from stl::multi_array and to + * work on views. + * \note zero based indexing is used. + * + */ static constexpr inline std::size_t max = std::numeric_limits::max(); -template<> -class span +template +class span { public: - using span_tag = tag::strided; using value_type = std::size_t; // covers the complete range of one dimension @@ -94,8 +78,8 @@ class span // covers only one index of one dimension // e.g. a(1) or a(end) - span(value_type n) - : span(n,1,n) + span(value_type f, value_type l) + : span(f,1,l) { } @@ -142,107 +126,44 @@ class span value_type first_, last_ , step_, size_; }; -using strided_span = span; - -} // namespace - - -///////////// - -namespace boost::numeric::ublas { - -template<> -class span : - private span -{ - using super_type = span; -public: - using span_tag = tag::sliced; - using value_type = typename super_type::value_type; - constexpr explicit span() - : super_type() - { - } - - span(value_type f, value_type l) - : super_type(f, value_type(1), l ) - { - } - - span(value_type n) - : super_type(n) - { - } - - span(span const& other) - : super_type(other) - { - } - - inline span& operator=(const span &other) - { - super_type::operator=(other); - return *this; - } - - ~span() = default; - - inline value_type operator[] (std::size_t idx) const - { - return super_type::operator [](idx); - } - - inline auto first() const {return super_type::first(); } - inline auto last () const {return super_type::last (); } - inline auto step () const {return super_type::step (); } - inline auto size () const {return super_type::size (); } - - inline span operator()(const span &rhs) const - { - auto const& lhs = *this; - return span( rhs.first_ + lhs.first_, rhs.last_ + lhs.first_ ); - } -}; - -using sliced_span = span; -template -inline auto ran(unsigned_type_left f, unsigned_type_right l) +template +inline auto ran(unsigned_type_lhs f, unsigned_type_rhs l) { - return sliced_span(f,l); + return span(f,l); } template inline auto ran(unsigned_type_left f, unsigned_type_middle s, unsigned_type_right l) { - return strided_span(f,s,l); + return span(f,s,l); } } // namespace -template -std::ostream& operator<< (std::ostream& out, boost::numeric::ublas::span const& s) +template +std::ostream& operator<< (std::ostream& out, boost::numeric::ublas::span const& s) { return out << "[" << s.first() << ":" << s.step() << ":" << s.last() << "]" << std::endl; } -template +template inline bool operator==( - boost::numeric::ublas::span const& lhs, - boost::numeric::ublas::span const& rhs) + boost::numeric::ublas::span const& lhs, + boost::numeric::ublas::span const& rhs) { return lhs.first() == rhs.first() && lhs.last() == rhs.last() && lhs.step() == rhs.step(); } -template +template inline bool operator!=( - boost::numeric::ublas::span const& lhs, - boost::numeric::ublas::span const& rhs) + boost::numeric::ublas::span const& lhs, + boost::numeric::ublas::span const& rhs) { - return lhs.first() != rhs.first() || lhs.last() != rhs.last() || lhs.step() != rhs.step(); + return !(lhs == rhs); } #endif // FHG_range_H diff --git a/include/boost/numeric/ublas/tensor/tags.hpp b/include/boost/numeric/ublas/tensor/tags.hpp index 711941e18..e67ab039c 100644 --- a/include/boost/numeric/ublas/tensor/tags.hpp +++ b/include/boost/numeric/ublas/tensor/tags.hpp @@ -16,6 +16,8 @@ struct storage_resizable_container_tag{}; struct storage_static_container_tag{}; struct storage_seq_container_tag{}; struct storage_non_seq_container_tag{}; +struct read_write_tag{}; +struct read_only_tag{}; } // namespace boost::numeric::ublas diff --git a/include/boost/numeric/ublas/tensor/tensor.hpp b/include/boost/numeric/ublas/tensor/tensor.hpp index 9d194fe7b..c56e5e981 100644 --- a/include/boost/numeric/ublas/tensor/tensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor.hpp @@ -18,6 +18,6 @@ #include "tensor/tensor_engine.hpp" #include "tensor/tensor_static_rank.hpp" #include "tensor/tensor_static.hpp" +#include "tensor/subtensor.hpp" #endif // BOOST_UBLAS_TENSOR_TENSOR_HPP - diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp new file mode 100644 index 000000000..7edbb3e5c --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp @@ -0,0 +1,102 @@ +// +// Copyright (c) 2021, Kannav Mehta, kmkannavkmehta@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +//: + + +/// \file subtensor.hpp Definition for the subtensor template class + +#ifndef BOOST_UBLAS_SUBTENSOR_HPP +#define BOOST_UBLAS_SUBTENSOR_HPP + +#include "../algorithms.hpp" +#include "../concepts.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" +#include "../extents.hpp" +#include "../index.hpp" +#include "../index_functions.hpp" +#include "../layout.hpp" +#include "../span.hpp" +#include "../tags.hpp" +#include "../traits/read_write_traits.hpp" +#include "../type_traits.hpp" +#include "subtensor_engine.hpp" +#include "tensor_engine.hpp" + +#include + +namespace boost::numeric::ublas { + +template +class tensor_core> + : public detail::tensor_expression>, + tensor_core>> { +public: + using engine_type = subtensor_engine; + using self_type = tensor_core; + + template + using tensor_expression_type = + detail::tensor_expression; + + template struct subtensor_iterator { + }; + + static constexpr bool is_const = + std::is_const>::value; + + using container_type = typename engine_type::container_type; + using layout_type = typename engine_type::layout_type; + using extents_type = typename engine_type::extents_type; + using strides_type = typename extents_type::base_type; + + using container_traits_type = container_traits; + + using size_type = typename container_traits_type::size_type; + using difference_type = typename container_traits_type::difference_type; + using value_type = typename container_traits_type::value_type; + + using reference = + std::conditional_t; + using const_reference = typename container_traits_type::const_reference; + + using pointer = + std::conditional_t; + using const_pointer = typename container_traits_type::const_pointer; + + using iterator = typename self_type::subtensor_iterator; + using const_iterator = + typename self_type::subtensor_iterator const; + + using reverse_iterator = typename container_traits_type::reverse_iterator; + using const_reverse_iterator = + typename container_traits_type::const_reverse_iterator; + + using container_tag = typename container_traits_type::container_tag; + using resizable_tag = typename container_traits_type::resizable_tag; + + using span_type = span; + + tensor_core(const tensor_core& tens) {} + + template tensor_core(const tensor_core& tens) {} +}; + +template using subtensor = tensor_core>; + +template auto make_subtensor(const T& tens) +{ + return subtensor(tens); +} + +} // namespace boost::numeric::ublas + +#endif diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp new file mode 100644 index 000000000..39c60db23 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp @@ -0,0 +1,27 @@ +// +// Copyright (c) 2021, Kannav Mehta, kmkannavkmehta@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef BOOST_UBLAS_SUBTENSOR_ENGINE_HPP +#define BOOST_UBLAS_SUBTENSOR_ENGINE_HPP + + +namespace boost::numeric::ublas +{ + +template +struct subtensor_engine +{ + using engine_type = typename T::engine_type; // reference to the parent engine + using extents_type = typename T::extents_type; // reference to the parent extents + using layout_type = typename T::layout_type; // reference to the parent layout + using container_type = typename T::container_type; // reference to the parent container +}; + +} // namespace boost::numeric::ublas + +#endif diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index ec27296a6..277c86bd2 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -210,7 +210,7 @@ template * @note extents are automatically extracted from the temporary matrix * * @param expr matrix expression - */ + */ template // NOLINTNEXTLINE(hicpp-explicit-conversions) inline tensor_core (const matrix_expression_type &expr) @@ -226,7 +226,7 @@ template * @note extents are automatically extracted from the temporary matrix * * @param expr vector expression - */ + */ template // NOLINTNEXTLINE(hicpp-explicit-conversions) inline tensor_core (const vector_expression_type &expr) @@ -463,4 +463,3 @@ using tensor_dynamic = tensor_core, L, std::vector>>; #endif - diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp index dc6cbd790..280e7b47f 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp @@ -20,7 +20,7 @@ struct tensor_engine { using extents_type = E; using layout_type = L; - using container_type = C; + using container_type = C; }; } // namespace boost::numeric::ublas diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp index 644ed9c51..910892241 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static.hpp @@ -192,7 +192,7 @@ class tensor_core> * @note extents are automatically extracted from the temporary matrix * * @param expr matrix expression - */ + */ template // NOLINTNEXTLINE(hicpp-explicit-conversions) inline tensor_core (const matrix_expression_type &expr) @@ -208,7 +208,7 @@ class tensor_core> * @note extents are automatically extracted from the temporary matrix * * @param expr vector expression - */ + */ template // NOLINTNEXTLINE(hicpp-explicit-conversions) inline tensor_core (const vector_expression_type &expr) @@ -453,4 +453,3 @@ using vector_static = tensor_static, L>; } // namespace boost::numeric::ublas::experimental #endif - diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp index 3a9205480..adf39d52b 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp @@ -156,7 +156,7 @@ template , _extents (ublas::begin(other.extents()),ublas::end (other.extents ())) , _strides (ublas::to_strides(_extents)) , _container(std::begin(other.container()),std::end (other.container())) - { + { } @@ -207,7 +207,7 @@ template * @note extents are automatically extracted from the temporary matrix * * @param expr matrix expression - */ + */ template // NOLINTNEXTLINE(hicpp-explicit-conversions) inline tensor_core (const matrix_expression_type &expr) @@ -223,7 +223,7 @@ template * @note extents are automatically extracted from the temporary matrix * * @param expr vector expression - */ + */ template // NOLINTNEXTLINE(hicpp-explicit-conversions) inline tensor_core (const vector_expression_type &expr) @@ -485,4 +485,3 @@ using vector = tensor_core>; #endif // BOOST_UBLAS_TENSOR_STATIC_RANK_HPP - diff --git a/include/boost/numeric/ublas/tensor/traits/read_write_traits.hpp b/include/boost/numeric/ublas/tensor/traits/read_write_traits.hpp new file mode 100644 index 000000000..a9a456eed --- /dev/null +++ b/include/boost/numeric/ublas/tensor/traits/read_write_traits.hpp @@ -0,0 +1,30 @@ +// +// Copyright (c) 2020, Kannav Mehta, kmkannavkmehta@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// + +#ifndef BOOST_UBLAS_TENSOR_READ_WRITE_TYPE_TRAITS_HPP +#define BOOST_UBLAS_TENSOR_READ_WRITE_TYPE_TRAITS_HPP + +#include +#include + +#include "../tags.hpp" + +namespace boost::numeric::ublas::detail { + +template +struct is_read_write : std::false_type {}; + +template<> +struct is_read_write : std::true_type {}; + +template +inline static constexpr bool is_read_write_v = is_read_write::value; + +} // namespace boost::numeric::ublas + +#endif diff --git a/include/boost/numeric/ublas/tensor/slice_detail/type_traits_slice.hpp b/include/boost/numeric/ublas/tensor/traits/slice_traits.hpp similarity index 91% rename from include/boost/numeric/ublas/tensor/slice_detail/type_traits_slice.hpp rename to include/boost/numeric/ublas/tensor/traits/slice_traits.hpp index 54f39b792..9db44a8ad 100644 --- a/include/boost/numeric/ublas/tensor/slice_detail/type_traits_slice.hpp +++ b/include/boost/numeric/ublas/tensor/traits/slice_traits.hpp @@ -14,7 +14,8 @@ #define _BOOST_NUMERIC_UBLAS_TENSOR_TYPE_TRAITS_SLICE_HPP_ #include -#include +#include +#include namespace boost::numeric::ublas::experimental { From 74acfee54983e7537c207bd7a3079192a1e6aeeb Mon Sep 17 00:00:00 2001 From: "kannav.mehta" Date: Fri, 30 Jul 2021 18:36:48 +0530 Subject: [PATCH 10/40] add subtensor implementation --- examples/tensor/subtensor.cpp | 20 +- .../boost/numeric/ublas/tensor/subtensor.hpp | 677 ------------------ .../numeric/ublas/tensor/tensor/subtensor.hpp | 79 +- .../ublas/tensor/tensor/subtensor_engine.hpp | 9 +- .../tensor/{ => tensor}/subtensor_utility.hpp | 32 +- .../ublas/tensor/tensor/tensor_dynamic.hpp | 24 +- test/tensor/test_subtensor.cpp | 1 - test/tensor/test_subtensor_utility.cpp | 2 +- 8 files changed, 101 insertions(+), 743 deletions(-) delete mode 100644 include/boost/numeric/ublas/tensor/subtensor.hpp rename include/boost/numeric/ublas/tensor/{ => tensor}/subtensor_utility.hpp (85%) diff --git a/examples/tensor/subtensor.cpp b/examples/tensor/subtensor.cpp index c557b2da2..4ae31615a 100644 --- a/examples/tensor/subtensor.cpp +++ b/examples/tensor/subtensor.cpp @@ -3,20 +3,8 @@ using namespace boost::numeric::ublas; int main() { - { - const auto ts = tensor_dynamic<>(); - auto sts = make_subtensor(ts); - auto sts_sts = make_subtensor(sts); - auto sts_sts_sts = make_subtensor(sts_sts); - static_assert(std::is_same_v); - static_assert(std::is_same_v); - } - { - auto ts = tensor_dynamic<>(); - auto sts = make_subtensor(ts); - auto sts_sts = make_subtensor(sts); - auto sts_sts_sts = make_subtensor(sts_sts); - static_assert(std::is_same_v); - static_assert(std::is_same_v); - } + const auto ts = tensor_dynamic<>(); + auto sts = ts(span(1,2), span(2,3)); + // auto sts_sts = sts(span(1,2), span(2,3)); + // auto sts_sts_sts = sts_sts(span(1,2), span(2,3)); } diff --git a/include/boost/numeric/ublas/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/subtensor.hpp deleted file mode 100644 index a119300b7..000000000 --- a/include/boost/numeric/ublas/tensor/subtensor.hpp +++ /dev/null @@ -1,677 +0,0 @@ -// Copyright (c) 2020, Cem Bassoy, cem.bassoy@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Fraunhofer and Google in producing this work -// which firsted as a Google Summer of Code project. -// - - -/// \file subtensor.hpp Definition for the tensor template class - -#ifndef BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP -#define BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_HPP - - -#include "tensor.hpp" -#include "subtensor_utility.hpp" -#include "extents.hpp" -#include "span.hpp" -#include "expression.hpp" - - -namespace boost::numeric::ublas { - - - -/** @brief A view of a dense tensor of values of type \c T. - * - * @tparam T type of the objects stored in the tensor (like int, double, complex,...) - * @tparam F - * @tparam A The type of the storage array of the tensor. Default is \c unbounded_array. \c and \c std::vector can also be used -*/ -template -class subtensor; - - -/** @brief A sliced view of a dense tensor of values of type \c T. - * - * For a \f$n\f$-dimensional tensor \f$v\f$ and \f$0\leq i < n\f$ every element \f$v_i\f$ is mapped - * to the \f$i\f$-th element of the container. A storage type \c A can be specified which defaults to \c unbounded_array. - * Elements are constructed by \c A, which need not initialise their value. - * - * @tparam T type of the objects stored in the tensor (like int, double, complex,...) - * @tparam F type of the layout which can be either - * @tparam A The type of the storage array of the tensor. Default is \c unbounded_array. \c and \c std::vector can also be used - */ -template -class subtensor > - : public detail::tensor_expression< - subtensor> , - subtensor> > -{ - - static_assert( std::is_same::value || std::is_same::value, - "boost::numeric::tensor template class only supports first- or last-order storage formats."); - - using tensor_type = tensor_dynamic; - using self_type = subtensor; -public: - - using domain_tag = tag::sliced; - - using span_type = span; - - template - using tensor_expression_type = detail::tensor_expression; - - template - using matrix_expression_type = matrix_expression; - - template - using vector_expression_type = vector_expression; - - using super_type = tensor_expression_type; - - // static_assert(std::is_same_v, detail::tensor_expression,tensor>>, "tensor_expression_type"); - - using container_type = typename tensor_type::container_type; - using layout_type = typename tensor_type::layout_type; - - using size_type = typename tensor_type::size_type; - using difference_type = typename tensor_type::difference_type; - using value_type = typename tensor_type::value_type; - - using reference = typename tensor_type::reference; - using const_reference = typename tensor_type::const_reference; - - using pointer = typename tensor_type::pointer; - using const_pointer = typename tensor_type::const_pointer; - - // using iterator = typename array_type::iterator; - // using const_iterator = typename array_type::const_iterator; - - // using reverse_iterator = typename array_type::reverse_iterator; - // using const_reverse_iterator = typename array_type::const_reverse_iterator; - - using tensor_temporary_type = self_type; - using storage_category = dense_tag; - - using extents_type = extents<>; - using strides_type = typename extents_type::base_type; - - using matrix_type = matrix; - using vector_type = vector; - - - - /** @brief Deleted constructor of a subtensor */ - subtensor () = delete; - - /** @brief Constructs a tensor view from a tensor without any range. - * - */ - BOOST_UBLAS_INLINE - subtensor (tensor_type& t) - : super_type () - , spans_ () - , extents_ (t.extents()) - , strides_ (t.strides()) - , span_strides_ (t.strides()) - , data_ (t.data()) - { - } - - template - subtensor(tensor_type& t, span_types&& ... spans) - : super_type () - , spans_ (detail::generate_span_vector(t.extents(),std::forward(spans)...)) - , extents_ (detail::to_extents(spans_)) - , strides_ (ublas::to_strides(extents_,layout_type{})) - , span_strides_ (detail::to_span_strides(t.strides(),spans_)) - , data_ {t.data() + detail::to_offset(t.strides(), spans_)} - { -// if( m == nullptr) -// throw std::length_error("Error in tensor_view::tensor_view : multi_array_type is nullptr."); -// if( t == nullptr) -// throw std::length_error("Error in tensor_view::tensor_view : tensor_type is nullptr."); - } - - - /** @brief Constructs a tensor view from a tensor without any range. - * - * @note is similar to a handle to a tensor - */ - explicit - subtensor (tensor_type const& t) - : super_type () - , spans_ () - , extents_ (t.extents()) - , strides_ (t.strides()) - , span_strides_ (t.strides()) - , data_ (t.data()) - { - } - - - - /** @brief Constructs a tensor from another tensor - * - * @param v tensor to be copied. - */ - inline - subtensor (const subtensor &v) - : super_type () - , spans_ (v.spans_) - , extents_ (v.extents_) - , strides_ (v.strides_) - , span_strides_ (v.span_strides_) - , data_ (v.data_) - {} - - - /** @brief Constructs a tensor from another tensor - * - * @param v tensor to be moved. - */ - BOOST_UBLAS_INLINE - subtensor (subtensor &&v) - : super_type () - , spans_ (std::move(v.spans_)) - , extents_ (std::move(v.extents_)) - , strides_ (std::move(v.strides_)) - , span_strides_ (std::move(v.span_strides_)) - , data_ (std::move(v.data_)) - {} - -#if 0 - - /** @brief Constructs a tensor with a matrix - * - * \note Initially the tensor will be two-dimensional. - * - * @param v matrix to be copied. - */ - BOOST_UBLAS_INLINE - tensor (const matrix_type &v) - : tensor_expression_type() - , extents_ () - , strides_ () - , data_ (v.data()) - { - if(!data_.empty()){ - extents_ = extents_type{v.size1(),v.size2()}; - strides_ = strides_type(extents_); - } - } - - /** @brief Constructs a tensor with a matrix - * - * \note Initially the tensor will be two-dimensional. - * - * @param v matrix to be moved. - */ - BOOST_UBLAS_INLINE - tensor (matrix_type &&v) - : tensor_expression_type() - , extents_ {} - , strides_ {} - , data_ {} - { - if(v.size1()*v.size2() != 0){ - extents_ = extents_type{v.size1(),v.size2()}; - strides_ = strides_type(extents_); - data_ = std::move(v.data()); - } - } - - /** @brief Constructs a tensor using a \c vector - * - * @note It is assumed that vector is column vector - * @note Initially the tensor will be one-dimensional. - * - * @param v vector to be copied. - */ - BOOST_UBLAS_INLINE - tensor (const vector_type &v) - : tensor_expression_type() - , extents_ () - , strides_ () - , data_ (v.data()) - { - if(!data_.empty()){ - extents_ = extents_type{data_.size(),1}; - strides_ = strides_type(extents_); - } - } - - /** @brief Constructs a tensor using a \c vector - * - * @param v vector to be moved. - */ - BOOST_UBLAS_INLINE - tensor (vector_type &&v) - : tensor_expression_type() - , extents_ {} - , strides_ {} - , data_ {} - { - if(v.size() != 0){ - extents_ = extents_type{v.size(),1}; - strides_ = strides_type(extents_); - data_ = std::move(v.data()); - } - } - - - /** @brief Constructs a tensor with another tensor with a different layout - * - * @param other tensor with a different layout to be copied. - */ - BOOST_UBLAS_INLINE - template - tensor (const tensor &other) - : tensor_expression_type () - , extents_ (other.extents()) - , strides_ (other.extents()) - , data_ (other.extents().product()) - { - copy(this->rank(), this->extents().data(), - this->data(), this->strides().data(), - other.data(), other.strides().data()); - } - - /** @brief Constructs a tensor with an tensor expression - * - * @code tensor A = B + 3 * C; @endcode - * - * @note type must be specified of tensor must be specified. - * @note dimension extents are extracted from tensors within the expression. - * - * @param expr tensor expression - */ - BOOST_UBLAS_INLINE - template - tensor (const tensor_expression_type &expr) - : tensor_expression_type () - , extents_ ( detail::retrieve_extents(expr) ) - , strides_ ( extents_ ) - , data_ ( extents_.product() ) - { - static_assert( detail::has_tensor_types>::value, - "Error in boost::numeric::ublas::tensor: expression does not contain a tensor. cannot retrieve shape."); - detail::eval( *this, expr ); - } - - /** @brief Constructs a tensor with a matrix expression - * - * @code tensor A = B + 3 * C; @endcode - * - * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. - * @note extents are automatically extracted from the temporary matrix - * - * @param expr matrix expression - */ - BOOST_UBLAS_INLINE - template - tensor (const matrix_expression_type &expr) - : tensor( matrix_type ( expr ) ) - { - } - - /** @brief Constructs a tensor with a vector expression - * - * @code tensor A = b + 3 * b; @endcode - * - * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. - * @note extents are automatically extracted from the temporary matrix - * - * @param expr vector expression - */ - BOOST_UBLAS_INLINE - template - tensor (const vector_expression_type &expr) - : tensor( vector_type ( expr ) ) - { - } - - /** @brief Evaluates the tensor_expression and assigns the results to the tensor - * - * @code A = B + C * 2; @endcode - * - * @note rank and dimension extents of the tensors in the expressions must conform with this tensor. - * - * @param expr expression that is evaluated. - */ - BOOST_UBLAS_INLINE - template - tensor &operator = (const tensor_expression_type &expr) - { - detail::eval(*this, expr); - return *this; - } - - tensor& operator=(tensor other) - { - swap (*this, other); - return *this; - } - - tensor& operator=(const_reference v) - { - std::fill(this->begin(), this->end(), v); - return *this; - } -#endif - - -// /** @brief Returns true if the subtensor is empty (\c size==0) */ -// inline bool empty () const { -// return this->size() == 0ul; -// } - - -// /** @brief Returns the size of the subtensor */ -// inline size_type size () const { -// return product(this->extents_); -// } - -// /** @brief Returns the size of the subtensor */ -// inline size_type size (size_type r) const { -// return this->extents_.at(r); -// } - -// /** @brief Returns the number of dimensions/modes of the subtensor */ -// inline size_type rank () const { -// return this->extents_.size(); -// } - -// /** @brief Returns the number of dimensions/modes of the subtensor */ -// inline size_type order () const { -// return this->extents_.size(); -// } - -// /** @brief Returns the strides of the subtensor */ -// inline auto const& strides () const { -// return this->strides_; -// } - - /** @brief Returns the span strides of the subtensor */ - inline auto const& span_strides () const { - return this->span_strides_; - } - - /** @brief Returns the span strides of the subtensor */ - inline auto const& spans () const { - return this->spans_; - } - - -// /** @brief Returns the extents of the subtensor */ -// inline auto const& extents() const { -// return this->extents_; -// } - - - [[nodiscard]] inline auto empty () const noexcept { return this->size() == 0ul; } - [[nodiscard]] inline auto size () const noexcept { return product(this->extents_); } - [[nodiscard]] inline auto size (size_type r) const { return extents_.at(r); } - [[nodiscard]] inline auto rank () const { return extents_.size(); } - [[nodiscard]] inline auto order () const { return this->rank(); } - - [[nodiscard]] inline auto const& strides () const noexcept { return strides_; } - [[nodiscard]] inline auto const& getExtents () const noexcept { return extents_; } - [[nodiscard]] inline auto data () const noexcept -> const_pointer { return data_;} - [[nodiscard]] inline auto data () noexcept -> pointer { return data_;} -// [[nodiscard]] inline auto const& base () const noexcept { return _container; } - - -// /** @brief Returns a \c const reference to the container. */ -// inline const_pointer data() const { -// return this->data_; -// } - -// /** @brief Returns a \c const reference to the container. */ -// inline pointer data () { -// return this->data_; -// } - - - - - - /** @brief Element access using a single index. - * - * @code auto a = A[i]; @endcode - * - * @param i zero-based index where 0 <= i < this->size() - */ - inline const_reference operator [] (size_type i) const { - return this->data_[i]; - } - - /** @brief Element access using a single index. - * - * - * @code A[i] = a; @endcode - * - * @param i zero-based index where 0 <= i < this->size() - */ - inline reference operator [] (size_type i) - { - return this->data_[i]; - } - -#if 0 - /** @brief Element access using a multi-index or single-index. - * - * - * @code auto a = A.at(i,j,k); @endcode or - * @code auto a = A.at(i); @endcode - * - * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - template - BOOST_UBLAS_INLINE - const_reference at (size_type i, size_types ... is) const { - if constexpr (sizeof...(is) == 0) - return this->data_[i]; - else - return this->data_[detail::access<0ul>(size_type(0),this->strides_,i,std::forward(is)...)]; - } - - /** @brief Element access using a multi-index or single-index. - * - * - * @code A.at(i,j,k) = a; @endcode or - * @code A.at(i) = a; @endcode - * - * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == 0, else 0<= i < this->size(0) - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - BOOST_UBLAS_INLINE - template - reference at (size_type i, size_types ... is) { - if constexpr (sizeof...(is) == 0) - return this->data_[i]; - else - return this->data_[detail::access<0ul>(size_type(0),this->strides_,i,std::forward(is)...)]; - } - - - - - /** @brief Element access using a single index. - * - * - * @code A(i) = a; @endcode - * - * @param i zero-based index where 0 <= i < this->size() - */ - BOOST_UBLAS_INLINE - const_reference operator()(size_type i) const { - return this->data_[i]; - } - - - /** @brief Element access using a single index. - * - * @code A(i) = a; @endcode - * - * @param i zero-based index where 0 <= i < this->size() - */ - BOOST_UBLAS_INLINE - reference operator()(size_type i){ - return this->data_[i]; - } - - - - - /** @brief Generates a tensor index for tensor contraction - * - * - * @code auto Ai = A(_i,_j,k); @endcode - * - * @param i placeholder - * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r < this->rank() - */ - BOOST_UBLAS_INLINE - template - decltype(auto) operator() (index::index_type p, index_types ... ps) const - { - constexpr auto N = sizeof...(ps)+1; - if( N != this->rank() ) - throw std::runtime_error("Error in boost::numeric::ublas::operator(): size of provided index_types does not match with the rank."); - - return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); - } - - - - - - /** @brief Reshapes the tensor - * - * - * (1) @code A.reshape(extents{m,n,o}); @endcode or - * (2) @code A.reshape(extents{m,n,o},4); @endcode - * - * If the size of this smaller than the specified extents than - * default constructed (1) or specified (2) value is appended. - * - * @note rank of the tensor might also change. - * - * @param e extents with which the tensor is reshaped. - * @param v value which is appended if the tensor is enlarged. - */ - BOOST_UBLAS_INLINE - void reshape (extents_type const& e, value_type v = value_type{}) - { - this->extents_ = e; - this->strides_ = strides_type(this->extents_); - - if(e.product() != this->size()) - this->data_.resize (this->extents_.product(), v); - } - - - friend void swap(tensor& lhs, tensor& rhs) { - std::swap(lhs.data_ , rhs.data_ ); - std::swap(lhs.extents_, rhs.extents_); - std::swap(lhs.strides_, rhs.strides_); - } - - - /// \brief return an iterator on the first element of the tensor - BOOST_UBLAS_INLINE - const_iterator begin () const { - return data_.begin (); - } - - /// \brief return an iterator on the first element of the tensor - BOOST_UBLAS_INLINE - const_iterator cbegin () const { - return data_.cbegin (); - } - - /// \brief return an iterator after the last element of the tensor - BOOST_UBLAS_INLINE - const_iterator end () const { - return data_.end(); - } - - /// \brief return an iterator after the last element of the tensor - BOOST_UBLAS_INLINE - const_iterator cend () const { - return data_.cend (); - } - - /// \brief Return an iterator on the first element of the tensor - BOOST_UBLAS_INLINE - iterator begin () { - return data_.begin(); - } - - /// \brief Return an iterator at the end of the tensor - BOOST_UBLAS_INLINE - iterator end () { - return data_.end(); - } - - /// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor) - BOOST_UBLAS_INLINE - const_reverse_iterator rbegin () const { - return data_.rbegin(); - } - - /// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor) - BOOST_UBLAS_INLINE - const_reverse_iterator crbegin () const { - return data_.crbegin(); - } - - /// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor) - BOOST_UBLAS_INLINE - const_reverse_iterator rend () const { - return data_.rend(); - } - - /// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor) - BOOST_UBLAS_INLINE - const_reverse_iterator crend () const { - return data_.crend(); - } - - /// \brief Return a const reverse iterator before the first element of the reversed tensor (i.e. end() of normal tensor) - BOOST_UBLAS_INLINE - reverse_iterator rbegin () { - return data_.rbegin(); - } - - /// \brief Return a const reverse iterator on the end of the reverse tensor (i.e. first element of the normal tensor) - BOOST_UBLAS_INLINE - reverse_iterator rend () { - return data_.rend(); - } - - -#endif - -private: - - std::vector spans_; - extents_type extents_; - strides_type strides_; - strides_type span_strides_; - pointer data_; -}; - - -} // namespaces boost::numeric::ublas - -#endif diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp index 7edbb3e5c..a2ec75d3b 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp @@ -24,8 +24,11 @@ #include "../tags.hpp" #include "../traits/read_write_traits.hpp" #include "../type_traits.hpp" -#include "subtensor_engine.hpp" + #include "tensor_engine.hpp" +#include "subtensor_engine.hpp" +#include "subtensor_utility.hpp" + #include @@ -40,14 +43,12 @@ class tensor_core> using self_type = tensor_core; template - using tensor_expression_type = - detail::tensor_expression; + using tensor_expression_type = detail::tensor_expression; template struct subtensor_iterator { }; - static constexpr bool is_const = - std::is_const>::value; + static constexpr bool is_const = std::is_const>::value; using container_type = typename engine_type::container_type; using layout_type = typename engine_type::layout_type; @@ -60,41 +61,77 @@ class tensor_core> using difference_type = typename container_traits_type::difference_type; using value_type = typename container_traits_type::value_type; - using reference = - std::conditional_t; using const_reference = typename container_traits_type::const_reference; - using pointer = - std::conditional_t; using const_pointer = typename container_traits_type::const_pointer; using iterator = typename self_type::subtensor_iterator; - using const_iterator = - typename self_type::subtensor_iterator const; + using const_iterator = typename self_type::subtensor_iterator const; using reverse_iterator = typename container_traits_type::reverse_iterator; - using const_reverse_iterator = - typename container_traits_type::const_reverse_iterator; + using const_reverse_iterator = typename container_traits_type::const_reverse_iterator; using container_tag = typename container_traits_type::container_tag; using resizable_tag = typename container_traits_type::resizable_tag; using span_type = span; - tensor_core(const tensor_core& tens) {} - - template tensor_core(const tensor_core& tens) {} + using subtensor_type = self_type; + + tensor_core(const tensor_core&) = default; + + template + tensor_core(T&& t, FS&& first_span, SL&& ... spans) + : _spans(detail::generate_span_vector(t.extents(), std::forward(first_span), std::forward(spans)...)), + _extents(detail::compute_extents(t.extents(), )), // TODO compute extents + _strides(t.strides()), + _data(t.data() + detail::to_offset(t.strides())) + { + _spans.resize(1 + sizeof(spans)...); + } + + /** + * @brief Generates a subtensor from a tensor + * + * @tparam f + * @tparam spans + */ + template + [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) const noexcept { + return subtensor_type(_data, _extents, _strides, std::forward(s), std::forward(spans)...); + } + + template + [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) noexcept { + return subtensor_type(_data, _extents, _strides, std::forward(s), std::forward(spans)...); + } + +private: + + template<,class FS, class ... SL> + tensor_core(pointer_type data, const extents_type& extents, const strides_type& strides, FS&& first_span, SL&& ... spans) noexcept + : _spans(detail::generate_span_vector(extents, std::forward(first_span), std::forward(spans)...)), + _data(data), + _extents(extents), + _strides(strides) + { + } + + std::vector _spans; + extents_type _extents; + strides_type _strides; + pointer _data; }; -template using subtensor = tensor_core>; - -template auto make_subtensor(const T& tens) -{ - return subtensor(tens); +template +decltype(auto) subtensor2(T&& t, FS&& first_span, SL&& ... spans) { + return tensor_core>(std::forward(t), std::forward(first_span), std::forward(spans)...); } } // namespace boost::numeric::ublas diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp index 39c60db23..0a0994ce8 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp @@ -16,10 +16,11 @@ namespace boost::numeric::ublas template struct subtensor_engine { - using engine_type = typename T::engine_type; // reference to the parent engine - using extents_type = typename T::extents_type; // reference to the parent extents - using layout_type = typename T::layout_type; // reference to the parent layout - using container_type = typename T::container_type; // reference to the parent container + using tensor_type = std::decay_t; + using engine_type = typename tensor_type::engine_type; // reference to the parent engine + using extents_type = typename tensor_type::extents_type; // reference to the parent extents + using layout_type = typename tensor_type::layout_type; // reference to the parent layout + using container_type = typename tensor_type::container_type; // reference to the parent container }; } // namespace boost::numeric::ublas diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp similarity index 85% rename from include/boost/numeric/ublas/tensor/subtensor_utility.hpp rename to include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp index 4b203c138..fdd6fa0ce 100644 --- a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp @@ -103,9 +103,9 @@ auto to_extents(spans_type const& spans) * @param[in] extent extent that is maybe used for the tranformation */ template -auto transform_span(span const& s, std::size_t const extent) +auto transform_span(span const& s, std::size_t const extent) { - using span_type = span; + using span_type = span; std::size_t first = s.first(); std::size_t last = s.last (); @@ -113,22 +113,11 @@ auto transform_span(span const& s, std::size_t const extent auto const extent0 = extent-1; - auto constexpr is_sliced = std::is_same::value; - - - if constexpr ( is_sliced ){ - if(size == 0) return span_type(0 , extent0); - else if(first== max) return span_type(extent0 , extent0); - else if(last == max) return span_type(first , extent0); - else return span_type(first , last ); - } - else { - size_type step = s.step (); - if(size == 0) return span_type(0 , size_type(1), extent0); - else if(first== max) return span_type(extent0 , step, extent0); - else if(last == max) return span_type(first , step, extent0); - else return span_type(first , step, last ); - } + size_type step = s.step (); + if(size == 0) return span_type(0 , size_type(1), extent0); + else if(first== max) return span_type(extent0 , step, extent0); + else if(last == max) return span_type(first , step, extent0); + else return span_type(first , step, last ); return span_type{}; } @@ -136,8 +125,8 @@ auto transform_span(span const& s, std::size_t const extent template void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ); -template -void transform_spans_impl(extents<> const& extents, std::array& span_array, span const& s, Spans&& ... spans) +template +void transform_spans_impl(extents<> const& extents, std::array& span_array, span const& s, Spans&& ... spans) { std::get(span_array) = transform_span(s, extents[r]); static constexpr auto nspans = sizeof...(spans); @@ -180,10 +169,9 @@ auto generate_span_array(extents<> const& extents, Spans&& ... spans) return span_array; } - /*! @brief Auxiliary function for subtensor that generates array of spans * - * generate_span_array(shape(4,3,5,2), span(), 1, span(2,end), end ) + * generate_span_vector(shape(4,3,5,2), span(), 1, span(2,end), end ) * -> std::array (span(0,3), span(1,1), span(2,4),span(1,1)) * * @note span is zero-based indexed. diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index 277c86bd2..712aff3b2 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -27,7 +27,8 @@ #include "../type_traits.hpp" #include "../tags.hpp" #include "../concepts.hpp" - +#include "../span.hpp" +#include "subtensor.hpp" #include "tensor_engine.hpp" @@ -84,6 +85,8 @@ template using matrix_type = matrix >; using vector_type = vector >; + using span_type = span; + explicit tensor_core () = default; /** @brief Constructs a tensor_core with a \c shape @@ -415,6 +418,24 @@ template return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); } + + /** + * @brief Generates a subtensor from a tensor + * + * @tparam f + * @tparam spans + */ + template + [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) const noexcept { + return subtensor(*this, std::forward(s), std::forward(spans)...); + } + + template + [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) noexcept { + return subtensor(*this, std::forward(s), std::forward(spans)...); + } + + friend void swap(tensor_core& lhs, tensor_core& rhs) { std::swap(lhs._extents , rhs._extents); @@ -449,6 +470,7 @@ template [[nodiscard]] inline auto const& base () const noexcept { return _container; } private: + extents_type _extents; strides_type _strides; container_type _container; diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index d091f4375..ce01fedbb 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -17,7 +17,6 @@ #include #include -#include #include diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index c1fab9fa1..8b6181161 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -14,7 +14,7 @@ #include #include "utility.hpp" -#include +#include #include #include #include From 5284adbdc71edb402c874cbe3698b208a3e4c18e Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Tue, 3 Aug 2021 00:17:54 +0530 Subject: [PATCH 11/40] add Memeber functions --- .../numeric/ublas/tensor/tensor/subtensor.hpp | 386 +++++++++++++----- .../ublas/tensor/tensor/subtensor_utility.hpp | 10 +- .../ublas/tensor/tensor/tensor_dynamic.hpp | 9 +- 3 files changed, 303 insertions(+), 102 deletions(-) diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp index a2ec75d3b..2ed99924e 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp @@ -24,11 +24,9 @@ #include "../tags.hpp" #include "../traits/read_write_traits.hpp" #include "../type_traits.hpp" - -#include "tensor_engine.hpp" #include "subtensor_engine.hpp" #include "subtensor_utility.hpp" - +#include "tensor_engine.hpp" #include @@ -39,100 +37,302 @@ class tensor_core> : public detail::tensor_expression>, tensor_core>> { public: - using engine_type = subtensor_engine; - using self_type = tensor_core; - - template - using tensor_expression_type = detail::tensor_expression; - - template struct subtensor_iterator { - }; - - static constexpr bool is_const = std::is_const>::value; - - using container_type = typename engine_type::container_type; - using layout_type = typename engine_type::layout_type; - using extents_type = typename engine_type::extents_type; - using strides_type = typename extents_type::base_type; - - using container_traits_type = container_traits; - - using size_type = typename container_traits_type::size_type; - using difference_type = typename container_traits_type::difference_type; - using value_type = typename container_traits_type::value_type; - - using reference = std::conditional_t; - using const_reference = typename container_traits_type::const_reference; - - using pointer = std::conditional_t; - using const_pointer = typename container_traits_type::const_pointer; - - using iterator = typename self_type::subtensor_iterator; - using const_iterator = typename self_type::subtensor_iterator const; - - using reverse_iterator = typename container_traits_type::reverse_iterator; - using const_reverse_iterator = typename container_traits_type::const_reverse_iterator; - - using container_tag = typename container_traits_type::container_tag; - using resizable_tag = typename container_traits_type::resizable_tag; - - using span_type = span; - - using subtensor_type = self_type; - - tensor_core(const tensor_core&) = default; - - template - tensor_core(T&& t, FS&& first_span, SL&& ... spans) - : _spans(detail::generate_span_vector(t.extents(), std::forward(first_span), std::forward(spans)...)), - _extents(detail::compute_extents(t.extents(), )), // TODO compute extents - _strides(t.strides()), - _data(t.data() + detail::to_offset(t.strides())) - { - _spans.resize(1 + sizeof(spans)...); - } - - /** - * @brief Generates a subtensor from a tensor - * - * @tparam f - * @tparam spans - */ - template - [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) const noexcept { - return subtensor_type(_data, _extents, _strides, std::forward(s), std::forward(spans)...); - } - - template - [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) noexcept { - return subtensor_type(_data, _extents, _strides, std::forward(s), std::forward(spans)...); - } + using engine_type = subtensor_engine; + using self_type = tensor_core; + + template + using tensor_expression_type = + detail::tensor_expression; + + template struct subtensor_iterator { + }; + + static constexpr bool is_const = + std::is_const>::value; + + using container_type = typename engine_type::container_type; + using layout_type = typename engine_type::layout_type; + using extents_type = typename engine_type::extents_type; + using strides_type = typename extents_type::base_type; + + using container_traits_type = container_traits; + + using size_type = typename container_traits_type::size_type; + using difference_type = typename container_traits_type::difference_type; + using value_type = typename container_traits_type::value_type; + + using reference = std::conditional_t; + using const_reference = typename container_traits_type::const_reference; + + using pointer = std::conditional_t; + using const_pointer = typename container_traits_type::const_pointer; + + using iterator = typename self_type::subtensor_iterator; + using const_iterator = + typename self_type::subtensor_iterator const; + + using reverse_iterator = typename container_traits_type::reverse_iterator; + using const_reverse_iterator = + typename container_traits_type::const_reverse_iterator; + + using container_tag = typename container_traits_type::container_tag; + using resizable_tag = typename container_traits_type::resizable_tag; + + using span_type = span; + + using subtensor_type = self_type; + + explicit tensor_core() = delete; + + tensor_core(const tensor_core&) = default; + + template + tensor_core(U&& t, FS&& first, SL&&... spans) + : _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) + , _extents(detail::compute_extents(t.extents(), std::forward(first), std::forward(spans)...)) + , _strides(t.strides()) + , _tensor(t) + { + _spans.resize(1 + sizeof(spans)...); + } + + /// @brief Default destructor + ~tensor_core() = default; + + /** @brief Evaluates the tensor_expression and assigns the results to the + * tensor_core + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must + * conform with this tensor_core. + * + * @param expr expression that is evaluated. + */ + template + tensor_core& operator=(const tensor_expression_type& expr) + { + detail::eval(*this, expr); + return *this; + } + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + tensor_core& operator=(tensor_core other) noexcept + { + swap(*this, other); + return *this; + } + + // tensor_core& operator=(const_reference v) + // { + // std::fill_n(_container.begin(), _container.size(), v); + // return *this; + // } + + /** @brief Element access using a multi-index with bound checking which can + * throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == + * 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r + * < this->rank() + */ + template + [[nodiscard]] inline const_reference at(I1 i1, I2 i2, Is... is) const + { + if (sizeof...(is) + 2 != this->order()) { + throw std::invalid_argument( + "boost::numeric::ublas::tensor_core::at : " + "Cannot access tensor with multi-index. " + "Number of provided indices does not match with tensor order."); + } + const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); + return _tensor.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can + * throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == + * 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r + * < this->rank() + */ + template + [[nodiscard]] inline reference at(I1 i1, I2 i2, Is... is) + { + if (sizeof...(is) + 2 != this->order()) { + throw std::invalid_argument( + "boost::numeric::ublas::tensor_core::at : " + "Cannot access tensor with multi-index." + "Number of provided indices does not match with tensor order."); + } + const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); + return _tensor.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can + * throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == + * 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r + * < this->rank() + */ + template + [[nodiscard]] inline const_reference operator()(Is... is) const + { + return this->at(is...); + } + + /** @brief Element access using a multi-index with bound checking which can + * throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == + * 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r + * < this->rank() + */ + template [[nodiscard]] inline reference operator()(Is... is) + { + return this->at(is...); + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline const_reference operator[](size_type i) const + { + return this->_tensor[i]; + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline reference operator[](size_type i) + { + return this->_tensor[i]; + } + + /** @brief Element access using a single-index with bound checking which can + * throw an exception. + * + * @code auto a = A.at(i); @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + template + [[nodiscard]] inline const_reference at(size_type i) const + { + return this->_container.at(i); + } + + /** @brief Read tensor element of a tensor \c t with a single-index \c i + * + * @code auto a = t.at(i); @endcode + * + * @param i zero-based index where 0 <= i < t.size() + */ + [[nodiscard]] inline reference at(size_type i) + { + return this->_container.at(i); + } + + /** @brief Generates a tensor_core index for tensor_core contraction + * + * + * @code auto Ai = A(_i,_j,k); @endcode + * + * @param i placeholder + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r + * < this->rank() + */ + template + [[nodiscard]] inline decltype(auto) operator()(index::index_type p, index_types... ps) const + { + constexpr auto size = sizeof...(ps) + 1; + if (size != this->order()) { + throw std::invalid_argument( + "boost::numeric::ublas::tensor_core : " + "Cannot multiply using Einstein notation. " + "Number of provided indices does not match with tensor order."); + } + return std::make_pair(std::cref(*this), std::make_tuple(p, std::forward(ps)...)); + } + + /** + * @brief Generates a subtensor from a tensor + * + * @code auto Ai = A(span(), span(1,end), span(1,end)); @endcode + * + * @tparam f + * @tparam spans + */ + template + [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) const noexcept + { + return subtensor_type(_tensor, _strides, std::forward(s), std::forward(spans)...); + } + + template + [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) noexcept + { + return subtensor_type(_tensor, _strides, std::forward(s), std::forward(spans)...); + } + + [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } + [[nodiscard]] inline auto end () const noexcept -> const_iterator { return _container.end (); } + [[nodiscard]] inline auto begin () noexcept -> iterator { return _container.begin (); } + [[nodiscard]] inline auto end () noexcept -> iterator { return _container.end (); } + [[nodiscard]] inline auto cbegin () const noexcept -> const_iterator { return _container.cbegin (); } + [[nodiscard]] inline auto cend () const noexcept -> const_iterator { return _container.cend (); } + [[nodiscard]] inline auto crbegin() const noexcept -> const_reverse_iterator { return _container.crbegin(); } + [[nodiscard]] inline auto crend () const noexcept -> const_reverse_iterator { return _container.crend (); } + [[nodiscard]] inline auto rbegin () const noexcept -> const_reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline auto rend () const noexcept -> const_reverse_iterator { return _container.rend (); } + [[nodiscard]] inline auto rbegin () noexcept -> reverse_iterator { return _container.rbegin (); } + [[nodiscard]] inline auto rend () noexcept -> reverse_iterator { return _container.rend (); } + + [[nodiscard]] inline auto empty () const noexcept { return _container.empty(); } + [[nodiscard]] inline auto size () const noexcept { return _container.size(); } + [[nodiscard]] inline auto size (size_type r) const { return _extents.at(r); } + [[nodiscard]] inline auto rank () const { return _extents.size(); } + [[nodiscard]] inline auto order () const { return this->rank(); } + + [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } + [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _container.data();} + [[nodiscard]] inline auto data () noexcept -> pointer { return _container.data();} + [[nodiscard]] inline auto const& base () const noexcept { return _container; } private: - - template<,class FS, class ... SL> - tensor_core(pointer_type data, const extents_type& extents, const strides_type& strides, FS&& first_span, SL&& ... spans) noexcept - : _spans(detail::generate_span_vector(extents, std::forward(first_span), std::forward(spans)...)), - _data(data), - _extents(extents), - _strides(strides) - { - } - - std::vector _spans; - extents_type _extents; - strides_type _strides; - pointer _data; + /** + * @brief There might be cases where spans cannot be computed on creation + */ + std::vector _spans; + extents_type _extents; + strides_type _strides; + T& _tensor; }; -template -decltype(auto) subtensor2(T&& t, FS&& first_span, SL&& ... spans) { - return tensor_core>(std::forward(t), std::forward(first_span), std::forward(spans)...); -} } // namespace boost::numeric::ublas diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp index fdd6fa0ce..0e2f8af4f 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp @@ -15,13 +15,13 @@ #ifndef _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ #define _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ +#include "../extents.hpp" +#include "../span.hpp" +#include "../tags.hpp" + #include -#include #include - -#include "span.hpp" -#include "extents.hpp" -#include "tags.hpp" +#include namespace boost::numeric::ublas::detail { diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index 712aff3b2..10f7e30c6 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -87,6 +87,8 @@ template using span_type = span; + using subtesnor_type = tensor_core>; + explicit tensor_core () = default; /** @brief Constructs a tensor_core with a \c shape @@ -281,8 +283,8 @@ template } // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) - tensor_core& operator=(tensor_core other) noexcept { + tensor_core& operator=(tensor_core other) noexcept swap (*this, other); return *this; } @@ -427,15 +429,14 @@ template */ template [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) const noexcept { - return subtensor(*this, std::forward(s), std::forward(spans)...); + return subtensor_type(*this, std::forward(s), std::forward(spans)...); } template [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) noexcept { - return subtensor(*this, std::forward(s), std::forward(spans)...); + return subtensor_type(*this, std::forward(s), std::forward(spans)...); } - friend void swap(tensor_core& lhs, tensor_core& rhs) { std::swap(lhs._extents , rhs._extents); From e948da0f0c40f348bd6175680f632654fdb71481 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Sun, 8 Aug 2021 23:11:35 +0530 Subject: [PATCH 12/40] Decoupled span from tensor, negative span --- include/boost/numeric/ublas/tensor/span.hpp | 76 +++++----- test/tensor/test_subtensor_utility.cpp | 151 ++++++-------------- 2 files changed, 77 insertions(+), 150 deletions(-) diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 2f9178f55..93d27826c 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -35,59 +35,55 @@ namespace boost::numeric::ublas { * */ -static constexpr inline std::size_t max = std::numeric_limits::max(); +static constexpr inline std::size_t max = std::numeric_limits::max(); +static constexpr inline std::size_t min = std::numeric_limits::min(); -template +template class span { public: - using value_type = std::size_t; + using value_type = std::ptrdiff_t; // covers the complete range of one dimension // e.g. a(:) constexpr explicit span() : first_{} - , last_ {} - , step_ {} - , size_ {} - {} + , last_ {max} + , step_ {1} + { + } + + // covers only one index of one dimension + // e.g. a(1) or a(0) + // TODO: case where stop < 0 then stop += length + span(value_type l) + : span(0,1,l) + { + } + // covers only one index of one dimension + // e.g. a(1:3) or a(0:end) + span(value_type f, value_type l) + : span(f,1,l) + { + } // covers a linear range of one dimension // e.g. a(1:3:n) span(value_type f, value_type s, value_type l) : first_(f) - , last_ (l) , step_ (s) + , last_(l) { - if(f == l){ - last_ = l; - size_ = value_type(1); - } - else { - if(s == 0 && f != l) - throw std::runtime_error("Error in span::span : cannot have a step_ equal to zero."); - - if(f > l) - throw std::runtime_error("Error in span::span: last_ is smaller than first"); - - last_ = l - ((l-f)%s); - size_ = (last_-first_)/s+value_type(1); - } - } + if(s == 0 && f != l) + throw std::runtime_error("Error in span::span : cannot have a step_ equal to zero."); - // covers only one index of one dimension - // e.g. a(1) or a(end) - span(value_type f, value_type l) - : span(f,1,l) - { } - span(span const& other) + span(span const& other) : first_(other.first_) , last_ (other.last_ ) , step_ (other.step_ ) - , size_ (other.size_ ) { } @@ -96,38 +92,34 @@ class span first_ = other.first_; last_ = other.last_ ; step_ = other.step_ ; - size_ = other.size_ ; return *this; } - inline auto first() const {return first_; } - inline auto last () const {return last_ ; } - inline auto step () const {return step_ ; } - inline auto size () const {return size_ ; } + inline auto first() const {return first_; } + inline auto last () const {return last_ ; } + inline auto step () const {return step_ ; } ~span() = default; - inline value_type operator[] (std::size_t idx) const + inline value_type operator[] (std::size_t idx) const { return first_ + idx * step_; } - inline span operator()(const span &rhs) const + inline span operator()(const span &rhs) const { auto const& lhs = *this; return span( - rhs.first_*lhs.step_ + lhs.first_, + rhs.first_*lhs.step_ + lhs.first_, lhs.step_ *rhs.step_, rhs.last_ *lhs.step_ + lhs.first_ ); } protected: - value_type first_, last_ , step_, size_; + value_type first_, last_ , step_; }; - - template inline auto ran(unsigned_type_lhs f, unsigned_type_rhs l) { @@ -166,4 +158,4 @@ inline bool operator!=( return !(lhs == rhs); } -#endif // FHG_range_H +#endif // BOOST_UBLAS_TENSOR SPAN diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index 8b6181161..c4f7fb2d9 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -14,130 +14,67 @@ #include #include "utility.hpp" -#include #include #include #include - +#include BOOST_AUTO_TEST_SUITE ( subtensor_utility_testsuite ) +struct fixture_span { + using span_type = boost::numeric::ublas::span; -struct fixture_sliced_span { - using span_type = boost::numeric::ublas::sliced_span; - - fixture_sliced_span() + fixture_span() : spans{ - span_type(), // 0, a(:) - span_type(0,0), // 1, a(0:0) - span_type(0,2), // 2, a(0:2) - span_type(1,1), // 3, a(1:1) - span_type(1,3), // 4, a(1:3) - span_type(1,boost::numeric::ublas::max), // 5, a(1:end) - span_type(boost::numeric::ublas::max) // 6, a(end) - } - {} - std::vector spans; -}; - - -BOOST_FIXTURE_TEST_CASE( transform_sliced_span_test, fixture_sliced_span ) -{ - - namespace ublas = boost::numeric::ublas; - -// template - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(2) ) == ublas::sliced_span(0,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(3) ) == ublas::sliced_span(0,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(4) ) == ublas::sliced_span(0,3) ); - - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(2) ) == ublas::sliced_span(0,0) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(3) ) == ublas::sliced_span(0,0) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(4) ) == ublas::sliced_span(0,0) ); - - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(3) ) == ublas::sliced_span(0,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(4) ) == ublas::sliced_span(0,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(5) ) == ublas::sliced_span(0,2) ); - - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(2) ) == ublas::sliced_span(1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(3) ) == ublas::sliced_span(1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(4) ) == ublas::sliced_span(1,1) ); - - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(4) ) == ublas::sliced_span(1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(5) ) == ublas::sliced_span(1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(6) ) == ublas::sliced_span(1,3) ); - - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(4) ) == ublas::sliced_span(1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(5) ) == ublas::sliced_span(1,4) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(6) ) == ublas::sliced_span(1,5) ); - - - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(4) ) == ublas::sliced_span(3,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(5) ) == ublas::sliced_span(4,4) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(6) ) == ublas::sliced_span(5,5) ); -} - - -struct fixture_strided_span { - using span_type = boost::numeric::ublas::strided_span; - - fixture_strided_span() - : spans{ - span_type(), // 0, a(:) - span_type(0,1,0), // 1, a(0:1:0) - span_type(0,2,2), // 2, a(0:2:2) - span_type(1,1,1), // 3, a(1:1:1) - span_type(1,1,3), // 4, a(1:1:3) - span_type(1,2,boost::numeric::ublas::max), // 5, a(1:2:end) - span_type(boost::numeric::ublas::max) // 6, a(end) - } + span_type(), // 0, a(:) + span_type(0,1,0), // 1, a(0:1:0) + span_type(0,2,2), // 2, a(0:2:2) + span_type(1,1,1), // 3, a(1:1:1) + span_type(1,1,3), // 4, a(1:1:3) + span_type(1,2,boost::numeric::ublas::max), // 5, a(1:2:end) + span_type(boost::numeric::ublas::max) // 6, a(end) + } {} std::vector spans; }; -BOOST_FIXTURE_TEST_CASE( transform_strided_span_test, fixture_strided_span ) +BOOST_FIXTURE_TEST_CASE( transform_strided_span_test, fixture_span ) { - using namespace boost::numeric; // template - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(2) ) == ublas::strided_span(0,1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(3) ) == ublas::strided_span(0,1,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(4) ) == ublas::strided_span(0,1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(3) ) == ublas::span(0,1,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(2) ) == ublas::span(0,1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(4) ) == ublas::span(0,1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(2) ) == ublas::strided_span(0,1,0) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(3) ) == ublas::strided_span(0,1,0) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(4) ) == ublas::strided_span(0,1,0) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(2) ) == ublas::span(0,1,0) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(3) ) == ublas::span(0,1,0) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(1), std::size_t(4) ) == ublas::span(0,1,0) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(3) ) == ublas::strided_span(0,2,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(4) ) == ublas::strided_span(0,2,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(5) ) == ublas::strided_span(0,2,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(3) ) == ublas::span(0,2,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(4) ) == ublas::span(0,2,2) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(2), std::size_t(5) ) == ublas::span(0,2,2) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(2) ) == ublas::strided_span(1,1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(3) ) == ublas::strided_span(1,1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(4) ) == ublas::strided_span(1,1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(2) ) == ublas::span(1,1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(3) ) == ublas::span(1,1,1) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(3), std::size_t(4) ) == ublas::span(1,1,1) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(4) ) == ublas::strided_span(1,1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(5) ) == ublas::strided_span(1,1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(6) ) == ublas::strided_span(1,1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(4) ) == ublas::span(1,1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(5) ) == ublas::span(1,1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(4), std::size_t(6) ) == ublas::span(1,1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(4) ) == ublas::strided_span(1,2,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(5) ) == ublas::strided_span(1,2,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(6) ) == ublas::strided_span(1,2,5) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(4) ) == ublas::span(1,2,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(5) ) == ublas::span(1,2,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(5), std::size_t(6) ) == ublas::span(1,2,5) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(4) ) == ublas::strided_span(3,1,3) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(5) ) == ublas::strided_span(4,1,4) ); - BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(6) ) == ublas::strided_span(5,1,5) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(4) ) == ublas::span(3,1,3) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(5) ) == ublas::span(4,1,4) ); + BOOST_CHECK( ublas::detail::transform_span(spans.at(6), std::size_t(6) ) == ublas::span(5,1,5) ); } - - - - - struct fixture_shape { using shape = boost::numeric::ublas::extents<>; @@ -160,7 +97,7 @@ struct fixture_shape { BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) { namespace ublas = boost::numeric::ublas; - using span = ublas::sliced_span; + using span = ublas::span; // shape{} { @@ -180,9 +117,9 @@ BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) // shape{1,1} { auto v = ublas::detail::generate_span_array(extents[1],ublas::max,span(ublas::max)); - auto r = std::vector{span(0,0),span(0,0)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } + auto r = std::vector{span(0,0),span(0,0)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + } // shape{1,1} { @@ -196,13 +133,13 @@ BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) auto v = ublas::detail::generate_span_array(extents[2],0,ublas::max); auto r = std::vector{span(0,0),span(1,1)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); - } + } // shape{1,2} { - auto v = ublas::detail::generate_span_array(extents[2],0,1); - auto r = std::vector{span(0,0),span(1,1)}; - BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); + auto v = ublas::detail::generate_span_array(extents[2],0,1); + auto r = std::vector{span(0,0),span(1,1)}; + BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } { @@ -242,7 +179,7 @@ BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) struct fixture_span_vector_shape { using shape = boost::numeric::ublas::extents<>; - using span = boost::numeric::ublas::sliced_span; + using span = boost::numeric::ublas::span; fixture_span_vector_shape() @@ -359,8 +296,6 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( offset_test, layout, test_types, fixture_span_ #if 0 - - BOOST_FIXTURE_TEST_CASE_TEMPLATE( span_strides_test, layout, test_types, fixture_span_vector_shape ) { From dc5c90469b645977b366b26e396691e25c2c98d2 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Sun, 15 Aug 2021 21:17:49 +0530 Subject: [PATCH 13/40] Fix bugs #51 #52 --- examples/tensor/subtensor.cpp | 28 +++++++-- include/boost/numeric/ublas/tensor/span.hpp | 18 ++++-- .../numeric/ublas/tensor/tensor/subtensor.hpp | 60 ++++++++++--------- .../ublas/tensor/tensor/subtensor_utility.hpp | 7 +-- .../ublas/tensor/tensor/tensor_dynamic.hpp | 4 +- 5 files changed, 73 insertions(+), 44 deletions(-) diff --git a/examples/tensor/subtensor.cpp b/examples/tensor/subtensor.cpp index 4ae31615a..261663446 100644 --- a/examples/tensor/subtensor.cpp +++ b/examples/tensor/subtensor.cpp @@ -2,9 +2,29 @@ using namespace boost::numeric::ublas; +void instantiate_subtensor_dynamic() { + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = ublas::layout::first_order; // storage format + using tensor = boost::numeric::ublas::tensor_dynamic; + constexpr auto ones = ublas::ones{}; + + try { + tensor t1 = ones(3, 4, 2); + std::cout << "t1 = " << t1 << std::endl; + + auto st1 = t1(span(1,ublas::max), span(), span(0,1)); + + std::cout << "st1 = " << st1 << std::endl; + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the instantiate_tensor_dynamic function of instantiate-tensor." << std::endl; + throw; + } +} + + int main() { - const auto ts = tensor_dynamic<>(); - auto sts = ts(span(1,2), span(2,3)); - // auto sts_sts = sts(span(1,2), span(2,3)); - // auto sts_sts_sts = sts_sts(span(1,2), span(2,3)); + instantiate_subtensor_dynamic(); } diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 93d27826c..9e475b1a4 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -36,20 +36,19 @@ namespace boost::numeric::ublas { */ static constexpr inline std::size_t max = std::numeric_limits::max(); -static constexpr inline std::size_t min = std::numeric_limits::min(); -template +template class span { public: - using value_type = std::ptrdiff_t; + using value_type = std::size_t; // covers the complete range of one dimension // e.g. a(:) constexpr explicit span() : first_{} - , last_ {max} , step_ {1} + , last_ {max} { } @@ -73,7 +72,7 @@ class span span(value_type f, value_type s, value_type l) : first_(f) , step_ (s) - , last_(l) + , last_ (l) { if(s == 0 && f != l) throw std::runtime_error("Error in span::span : cannot have a step_ equal to zero."); @@ -115,9 +114,16 @@ class span rhs.last_ *lhs.step_ + lhs.first_ ); } + inline value_type size() const { + if (first_ == last_) { + return value_type(1); + } + return (last_ - first_) / step_; + } + protected: - value_type first_, last_ , step_; + value_type first_, step_, last_ ; }; template diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp index 2ed99924e..15756ba64 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp @@ -12,6 +12,7 @@ #ifndef BOOST_UBLAS_SUBTENSOR_HPP #define BOOST_UBLAS_SUBTENSOR_HPP +#include "../access.hpp" #include "../algorithms.hpp" #include "../concepts.hpp" #include "../expression.hpp" @@ -93,11 +94,11 @@ class tensor_core> template tensor_core(U&& t, FS&& first, SL&&... spans) : _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) - , _extents(detail::compute_extents(t.extents(), std::forward(first), std::forward(spans)...)) + , _extents{} , _strides(t.strides()) , _tensor(t) { - _spans.resize(1 + sizeof(spans)...); + _extents = detail::to_extents(_spans); } /// @brief Default destructor @@ -218,7 +219,8 @@ class tensor_core> */ [[nodiscard]] inline const_reference operator[](size_type i) const { - return this->_tensor[i]; + const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); + return this->_tensor[idx]; } /** @brief Element access using a single index. @@ -229,7 +231,8 @@ class tensor_core> */ [[nodiscard]] inline reference operator[](size_type i) { - return this->_tensor[i]; + const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); + return this->_tensor[idx]; } /** @brief Element access using a single-index with bound checking which can @@ -242,7 +245,9 @@ class tensor_core> template [[nodiscard]] inline const_reference at(size_type i) const { - return this->_container.at(i); + + const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); + return this->_tensor.at(idx); } /** @brief Read tensor element of a tensor \c t with a single-index \c i @@ -253,7 +258,8 @@ class tensor_core> */ [[nodiscard]] inline reference at(size_type i) { - return this->_container.at(i); + const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); + return this->_tensor.at(idx); } /** @brief Generates a tensor_core index for tensor_core contraction @@ -298,30 +304,30 @@ class tensor_core> return subtensor_type(_tensor, _strides, std::forward(s), std::forward(spans)...); } - [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } - [[nodiscard]] inline auto end () const noexcept -> const_iterator { return _container.end (); } - [[nodiscard]] inline auto begin () noexcept -> iterator { return _container.begin (); } - [[nodiscard]] inline auto end () noexcept -> iterator { return _container.end (); } - [[nodiscard]] inline auto cbegin () const noexcept -> const_iterator { return _container.cbegin (); } - [[nodiscard]] inline auto cend () const noexcept -> const_iterator { return _container.cend (); } - [[nodiscard]] inline auto crbegin() const noexcept -> const_reverse_iterator { return _container.crbegin(); } - [[nodiscard]] inline auto crend () const noexcept -> const_reverse_iterator { return _container.crend (); } - [[nodiscard]] inline auto rbegin () const noexcept -> const_reverse_iterator { return _container.rbegin (); } - [[nodiscard]] inline auto rend () const noexcept -> const_reverse_iterator { return _container.rend (); } - [[nodiscard]] inline auto rbegin () noexcept -> reverse_iterator { return _container.rbegin (); } - [[nodiscard]] inline auto rend () noexcept -> reverse_iterator { return _container.rend (); } - - [[nodiscard]] inline auto empty () const noexcept { return _container.empty(); } - [[nodiscard]] inline auto size () const noexcept { return _container.size(); } - [[nodiscard]] inline auto size (size_type r) const { return _extents.at(r); } - [[nodiscard]] inline auto rank () const { return _extents.size(); } - [[nodiscard]] inline auto order () const { return this->rank(); } +// [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } +// [[nodiscard]] inline auto end () const noexcept -> const_iterator { return _container.end (); } +// [[nodiscard]] inline auto begin () noexcept -> iterator { return _container.begin (); } +// [[nodiscard]] inline auto end () noexcept -> iterator { return _container.end (); } +// [[nodiscard]] inline auto cbegin () const noexcept -> const_iterator { return _container.cbegin (); } +// [[nodiscard]] inline auto cend () const noexcept -> const_iterator { return _container.cend (); } +// [[nodiscard]] inline auto crbegin() const noexcept -> const_reverse_iterator { return _container.crbegin(); } +// [[nodiscard]] inline auto crend () const noexcept -> const_reverse_iterator { return _container.crend (); } +// [[nodiscard]] inline auto rbegin () const noexcept -> const_reverse_iterator { return _container.rbegin (); } +// [[nodiscard]] inline auto rend () const noexcept -> const_reverse_iterator { return _container.rend (); } +// [[nodiscard]] inline auto rbegin () noexcept -> reverse_iterator { return _container.rbegin (); } +// [[nodiscard]] inline auto rend () noexcept -> reverse_iterator { return _container.rend (); } + + [[nodiscard]] inline auto empty () const noexcept { return size() == 0; } + [[nodiscard]] inline auto size () const noexcept { return ublas::product(_extents);} + [[nodiscard]] inline auto size (size_type r) const { return _extents.at(r); } + [[nodiscard]] inline auto rank () const { return _extents.size(); } + [[nodiscard]] inline auto order () const { return this->rank(); } [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } - [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _container.data();} - [[nodiscard]] inline auto data () noexcept -> pointer { return _container.data();} - [[nodiscard]] inline auto const& base () const noexcept { return _container; } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data();} + [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data();} + [[nodiscard]] inline auto const& base () const noexcept { return _tensor.container(); } private: /** diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp index 0e2f8af4f..b33cd80b5 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp @@ -102,20 +102,18 @@ auto to_extents(spans_type const& spans) * @param[in] s span that is going to be transformed * @param[in] extent extent that is maybe used for the tranformation */ -template +template auto transform_span(span const& s, std::size_t const extent) { using span_type = span; std::size_t first = s.first(); std::size_t last = s.last (); - std::size_t size = s.size (); auto const extent0 = extent-1; size_type step = s.step (); - if(size == 0) return span_type(0 , size_type(1), extent0); - else if(first== max) return span_type(extent0 , step, extent0); + if(first== max) return span_type(extent0 , step, extent0); else if(last == max) return span_type(first , step, extent0); else return span_type(first , step, last ); return span_type{}; @@ -186,7 +184,6 @@ auto generate_span_vector(extents<> const& extents, Spans&& ... spans) return std::vector(span_array.begin(), span_array.end()); } - } // namespace boost::numeric::ublas::detail diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index 10f7e30c6..bd1dab28e 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -87,7 +87,7 @@ template using span_type = span; - using subtesnor_type = tensor_core>; + using subtensor_type = tensor_core>; explicit tensor_core () = default; @@ -283,8 +283,8 @@ template } // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) - { tensor_core& operator=(tensor_core other) noexcept + { swap (*this, other); return *this; } From 3e6699c4b10c8e8f0714f3e59d4ccd620a547f6a Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Tue, 17 Aug 2021 00:12:33 +0530 Subject: [PATCH 14/40] Revert span to original --- examples/tensor/subtensor.cpp | 10 ++++++++++ include/boost/numeric/ublas/tensor/span.hpp | 6 ++++-- .../boost/numeric/ublas/tensor/tensor/subtensor.hpp | 5 ++++- .../numeric/ublas/tensor/tensor/subtensor_utility.hpp | 9 ++++++--- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/examples/tensor/subtensor.cpp b/examples/tensor/subtensor.cpp index 261663446..a51d56867 100644 --- a/examples/tensor/subtensor.cpp +++ b/examples/tensor/subtensor.cpp @@ -24,6 +24,16 @@ void instantiate_subtensor_dynamic() { } } +void instantiate_tensor_dynamics_with_static_order() +{ +} + + +void instantiate_tensor_static() +{ +} + + int main() { instantiate_subtensor_dynamic(); diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 9e475b1a4..50fda9bd3 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -76,7 +76,9 @@ class span { if(s == 0 && f != l) throw std::runtime_error("Error in span::span : cannot have a step_ equal to zero."); - + if(f > l) + throw std::runtime_error("Error in span::span: last_ is smaller than first"); + last_ = l - ((l-f)%s); } span(span const& other) @@ -118,7 +120,7 @@ class span if (first_ == last_) { return value_type(1); } - return (last_ - first_) / step_; + return (last_-first_) / step_ + value_type(1); } protected: diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp index 15756ba64..04a1b3662 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp @@ -98,7 +98,10 @@ class tensor_core> , _strides(t.strides()) , _tensor(t) { - _extents = detail::to_extents(_spans); + _extents = detail::to_extents(_spans); + for (int i = 0; i < (int) _extents.size(); i++) { + std::cout << _extents[i] << std::endl; + } } /// @brief Default destructor diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp index b33cd80b5..19af629ff 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp @@ -113,9 +113,12 @@ auto transform_span(span const& s, std::size_t const extent) auto const extent0 = extent-1; size_type step = s.step (); - if(first== max) return span_type(extent0 , step, extent0); - else if(last == max) return span_type(first , step, extent0); - else return span_type(first , step, last ); + if(first >= extent) { + return (last >= extent ? span_type(extent , step, extent ) : + span_type(extent0, step, extent0)); + } + else if(last >= extent) return span_type(first , step, extent0); + else return span_type(first , step, last ); return span_type{}; } From 1f958aea54c19a3ac7b687bddf59da8333cae897 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Wed, 18 Aug 2021 00:55:35 +0530 Subject: [PATCH 15/40] WIP: Add support for static rank tensors --- examples/tensor/subtensor.cpp | 28 ++++++++++++--- .../ublas/tensor/detail/extents_functions.hpp | 36 +++++++++---------- include/boost/numeric/ublas/tensor/span.hpp | 4 +-- .../tensor/tensor/tensor_static_rank.hpp | 24 +++++++++++++ .../ublas/tensor/traits/slice_traits.hpp | 11 +++--- 5 files changed, 75 insertions(+), 28 deletions(-) diff --git a/examples/tensor/subtensor.cpp b/examples/tensor/subtensor.cpp index a51d56867..3541d2674 100644 --- a/examples/tensor/subtensor.cpp +++ b/examples/tensor/subtensor.cpp @@ -2,12 +2,13 @@ using namespace boost::numeric::ublas; -void instantiate_subtensor_dynamic() { +void instantiate_subtensor_dynamic() +{ namespace ublas = boost::numeric::ublas; using value = float; - using layout = ublas::layout::first_order; // storage format - using tensor = boost::numeric::ublas::tensor_dynamic; - constexpr auto ones = ublas::ones{}; + using layout = boost::numeric::ublas::layout::first_order; // storage format + using tensor = boost::numeric::ublas::tensor_static_rank; + constexpr auto ones = ublas::ones_static_rank{}; try { tensor t1 = ones(3, 4, 2); @@ -26,6 +27,25 @@ void instantiate_subtensor_dynamic() { void instantiate_tensor_dynamics_with_static_order() { + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = ublas::layout::first_order; // storage format + using tensor = boost::numeric::ublas::tensor_dynamic; + constexpr auto ones = ublas::ones{}; + + try { + tensor t1 = ones(3, 4, 2); + std::cout << "t1 = " << t1 << std::endl; + + auto st1 = t1(span(1,ublas::max), span(), span(0,1)); + + std::cout << "st1 = " << st1 << std::endl; + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the instantiate_tensor_dynamic function of instantiate-tensor." << std::endl; + throw; + } } diff --git a/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp b/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp index 479555e57..eb47adbc1 100644 --- a/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp +++ b/include/boost/numeric/ublas/tensor/detail/extents_functions.hpp @@ -10,11 +10,11 @@ // Google // -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP_ +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP +#define _BOOST_NUMERIC_UBLAS_TENSOR_EXTENTS_FUNCTIONS_HPP #include -#include +#include #include #include #include @@ -58,8 +58,8 @@ if constexpr( sizeof...(E) == 0ul ){ } template -constexpr auto squeeze_impl( basic_static_extents const& e ){ +constexpr auto squeeze_impl( basic_static_extents const& e ){ using extents_type = basic_static_extents; if constexpr( extents_type::_size <= typename extents_type::size_type(2) ){ @@ -160,7 +160,7 @@ template [[nodiscard]] inline constexpr bool valid(ExtentsType const &e) { - + static_assert(is_extents_v, "boost::numeric::ublas::valid() : " "invalid type, type should be an extents"); @@ -183,20 +183,20 @@ template template [[nodiscard]] inline std::string to_string(T const &e) { - + using value_type = typename T::value_type; - static_assert(is_extents_v ||is_strides_v, + static_assert(is_extents_v ||is_strides_v, "boost::numeric::ublas::to_string() : invalid type, type should be an extents or a strides"); if ( e.empty() ) return "[]"; std::stringstream ss; - + ss << "[ "; std::copy( e.begin(), e.end() - 1, std::ostream_iterator(ss,", ") ); - + ss << e.back() << " ]"; return ss.str(); @@ -369,35 +369,35 @@ template constexpr auto product(ExtentsType const &e) { static_assert(is_extents_v, "boost::numeric::ublas::product() : invalid type, type should be an extents"); - + if ( e.empty() ) return 0u; else return std::accumulate(e.begin(), e.end(), 1u, std::multiplies<>()) ; } -template && is_extents_v - , int> = 0 + , int> = 0 > [[nodiscard]] inline constexpr bool operator==(LExtents const& lhs, RExtents const& rhs) noexcept{ - - static_assert( std::is_same_v, + + static_assert( std::is_same_v, "boost::numeric::ublas::operator==(LExtents, RExtents) : LHS value type should be same as RHS value type"); return ( lhs.size() == rhs.size() ) && std::equal(lhs.begin(), lhs.end(), rhs.begin()); } -template && is_extents_v - , int> = 0 + , int> = 0 > [[nodiscard]] inline constexpr bool operator!=(LExtents const& lhs, RExtents const& rhs) noexcept{ - - static_assert( std::is_same_v, + + static_assert( std::is_same_v, "boost::numeric::ublas::operator!=(LExtents, RExtents) : LHS value type should be same as RHS value type"); return !( lhs == rhs ); diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 50fda9bd3..c61f0c1ef 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -55,7 +55,7 @@ class span // covers only one index of one dimension // e.g. a(1) or a(0) // TODO: case where stop < 0 then stop += length - span(value_type l) + explicit span(value_type l) : span(0,1,l) { } @@ -116,7 +116,7 @@ class span rhs.last_ *lhs.step_ + lhs.first_ ); } - inline value_type size() const { + [[ nodiscard ]] inline value_type size() const { if (first_ == last_) { return value_type(1); } diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp index adf39d52b..152d09ba5 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp @@ -27,6 +27,9 @@ #include "../type_traits.hpp" #include "../tags.hpp" #include "../concepts.hpp" +#include "../span.hpp" +#include "subtensor.hpp" +#include "tensor_engine.hpp" #include "tensor_engine.hpp" @@ -85,6 +88,9 @@ template using matrix_type = matrix >; using vector_type = vector >; + using span_type = span; + using subtensor_type = tensor_core>; + tensor_core () = default; /** @brief Constructs a tensor_core with a \c shape @@ -410,6 +416,24 @@ template return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); } + /** + * @brief Generates a subtensor from a tensor + * + * @tparam f + * @tparam spans + */ + template + [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) const noexcept { + static_assert(sizeof...(spans)+1 == std::tuple_size_v); + return subtensor_type(*this, std::forward(s), std::forward(spans)...); + } + + template + [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) noexcept { + static_assert(sizeof...(spans)+1 == std::tuple_size_v); + return subtensor_type(*this, std::forward(s), std::forward(spans)...); + } + friend void swap(tensor_core& lhs, tensor_core& rhs) { std::swap(lhs._extents , rhs._extents ); diff --git a/include/boost/numeric/ublas/tensor/traits/slice_traits.hpp b/include/boost/numeric/ublas/tensor/traits/slice_traits.hpp index 9db44a8ad..158e11d69 100644 --- a/include/boost/numeric/ublas/tensor/traits/slice_traits.hpp +++ b/include/boost/numeric/ublas/tensor/traits/slice_traits.hpp @@ -13,12 +13,14 @@ #ifndef _BOOST_NUMERIC_UBLAS_TENSOR_TYPE_TRAITS_SLICE_HPP_ #define _BOOST_NUMERIC_UBLAS_TENSOR_TYPE_TRAITS_SLICE_HPP_ +#ifdef 0 + #include #include #include namespace boost::numeric::ublas::experimental { - + template struct basic_slice; @@ -31,21 +33,22 @@ namespace boost::numeric::ublas::experimental { } // namespace boost::numeric::ublas::span namespace boost::numeric::ublas::experimental { - + template struct is_slice< basic_slice > : std::true_type{}; } // namespace boost::numeric::ublas::span namespace boost::numeric::ublas{ - + template struct is_dynamic< experimental::basic_slice > : std::true_type{}; - + template struct is_static< experimental::basic_slice > : std::true_type{}; } // namespace boost::numeric::ublas +#endif #endif From d75b1e8167406e2c49218154daeca78238258c99 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Thu, 19 Aug 2021 12:28:59 +0530 Subject: [PATCH 16/40] Improve examples and bug fixes --- examples/tensor/access_subtensor.cpp | 121 +++++++++++ examples/tensor/expressions_subtensor.cpp | 77 +++++++ ...ubtensor.cpp => instantiate_subtensor.cpp} | 17 +- .../numeric/ublas/tensor/dynamic_strides.hpp | 8 +- include/boost/numeric/ublas/tensor/span.hpp | 8 +- .../boost/numeric/ublas/tensor/strides.hpp | 22 +- .../ublas/tensor/subtensor_utility.hpp | 196 ++++++++++++++++++ .../numeric/ublas/tensor/tensor/subtensor.hpp | 26 ++- .../ublas/tensor/tensor/subtensor_engine.hpp | 8 +- .../ublas/tensor/tensor/tensor_engine.hpp | 4 +- test/tensor/test_span.cpp | 6 +- test/tensor/test_subtensor.cpp | 2 +- test/tensor/test_subtensor_utility.cpp | 15 +- 13 files changed, 455 insertions(+), 55 deletions(-) create mode 100644 examples/tensor/access_subtensor.cpp create mode 100644 examples/tensor/expressions_subtensor.cpp rename examples/tensor/{subtensor.cpp => instantiate_subtensor.cpp} (73%) create mode 100644 include/boost/numeric/ublas/tensor/subtensor_utility.hpp diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp new file mode 100644 index 000000000..1d170eeec --- /dev/null +++ b/examples/tensor/access_subtensor.cpp @@ -0,0 +1,121 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#include +#include + +#include + +//NOLINTNEXTLINE +int main() +{ + namespace ublas = boost::numeric::ublas; + + try { + using value = float; + using layout = ublas::layout::first_order; // storage format + using tensor = ublas::tensor_dynamic; + using span = ublas::span<>; +// constexpr auto ones = ublas::ones{}; + constexpr auto zeros = ublas::zeros{}; + + + // creates a three-dimensional tensor with extents 3,4 and 2 + // tensor A stores single-precision floating-point number according + // to the first-order storage format + + tensor t1 = zeros(3,4,2); + auto A = t1(span(), span(), span()); + + // initializes the tensor with increasing values along the first-index + // using a single index. + auto vf = 1.0f; + for(auto i = 0u; i < A.size(); ++i, vf += 1.0f) + A[i] = vf; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "A=" << A << ";" << std::endl << std::endl; + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of access-tensor." << std::endl; + } + + + try { + using value = std::complex; + using layout = ublas::layout::last_order; // storage format + using tensor = ublas::tensor_dynamic; + using shape = typename tensor::extents_type; + using span = ublas::span<>; + constexpr auto zeros = ublas::zeros{}; + + + // creates a four-dimensional tensor with extents 5,4,3 and 2 + // tensor A stores complex floating-point extended double precision numbers + // according to the last-order storage format + // and initializes it with the default value. + + //NOLINTNEXTLINE + tensor t1 = zeros(5,4,3,2); + auto B = t1(span(), span(), span(), span(), span()); + + // initializes the tensor with increasing values along the last-index + // using a single-index + auto vc = value(0,0); + for(auto i = 0u; i < B.size(); ++i, vc += value(1,1)) + B[i] = vc; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "B=" << B << ";" << std::endl << std::endl; + + + auto C = tensor(B.extents()); + // computes the complex conjugate of elements of B + // using multi-index notation. + for(auto i = 0u; i < B.size(0); ++i) + for(auto j = 0u; j < B.size(1); ++j) + for(auto k = 0u; k < B.size(2); ++k) + for(auto l = 0u; l < B.size(3); ++l) + C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); + + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "C=" << C << ";" << std::endl << std::endl; + + + + // computes the complex conjugate of elements of B + // using iterators. + auto D = tensor(B.extents()); + std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "D=" << D << ";" << std::endl << std::endl; + + // reshaping tensors. + auto new_extents = B.extents().base(); + std::next_permutation( new_extents.begin(), new_extents.end() ); + auto E = reshape( D, shape(new_extents) ); + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "E=" << E << ";" << std::endl << std::endl; + + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of access-tensor." << std::endl; + } +} diff --git a/examples/tensor/expressions_subtensor.cpp b/examples/tensor/expressions_subtensor.cpp new file mode 100644 index 000000000..e5cda496b --- /dev/null +++ b/examples/tensor/expressions_subtensor.cpp @@ -0,0 +1,77 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#include +#include +#include +#include + +int main() +{ + namespace ublas = boost::numeric::ublas; + using value = float; + using tensor = ublas::tensor_dynamic; + using matrix = ublas::matrix; + using vector = ublas::vector; + using shape = tensor::extents_type; + using span = ublas::span<>; + + try { + + + auto t1 = tensor{3,4,2}; + auto A = t1(span(1,ublas::max), span(), span(0,1)); + + tensor B = A = 2; + + // Calling overloaded operators + // and using simple tensor expression templates. + if( A != (B+1) ){ + A += 2*B - 1; + } + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "A=" << A << ";" << std::endl << std::endl; + + auto n = shape{3,4}; + auto D = matrix(n[0],n[1],1); + auto e = vector(n[1],1); + auto f = vector(n[0],2); + + auto C = t1(span(0,0)); + // Calling constructor with + // vector expression templates + tensor C = 2*f; + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "C=" << C << ";" << std::endl << std::endl; + + + // Calling overloaded operators + // and mixing simple tensor and matrix expression templates + tensor F = 3*C + 4*prod(2*D,e); + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "F=" << F << ";" << std::endl << std::endl; + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of simple expression." << std::endl; + } + +} diff --git a/examples/tensor/subtensor.cpp b/examples/tensor/instantiate_subtensor.cpp similarity index 73% rename from examples/tensor/subtensor.cpp rename to examples/tensor/instantiate_subtensor.cpp index 3541d2674..790b5a808 100644 --- a/examples/tensor/subtensor.cpp +++ b/examples/tensor/instantiate_subtensor.cpp @@ -14,7 +14,7 @@ void instantiate_subtensor_dynamic() tensor t1 = ones(3, 4, 2); std::cout << "t1 = " << t1 << std::endl; - auto st1 = t1(span(1,ublas::max), span(), span(0,1)); + auto st1 = t1(span(0,ublas::max), span(), span(0,1)); std::cout << "st1 = " << st1 << std::endl; @@ -25,9 +25,9 @@ void instantiate_subtensor_dynamic() } } -void instantiate_tensor_dynamics_with_static_order() +void instantiate_subtensor_dynamic_with_static_order() { - namespace ublas = boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; using value = float; using layout = ublas::layout::first_order; // storage format using tensor = boost::numeric::ublas::tensor_dynamic; @@ -49,12 +49,19 @@ void instantiate_tensor_dynamics_with_static_order() } -void instantiate_tensor_static() +void instantiate_subtensor_static() { } int main() { - instantiate_subtensor_dynamic(); + try{ + instantiate_subtensor_dynamic(); + instantiate_subtensor_dynamic_with_static_order(); + instantiate_subtensor_static(); + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of instantiate-tensor." << std::endl; + } } diff --git a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp index 45001f179..f4d88c2dd 100644 --- a/include/boost/numeric/ublas/tensor/dynamic_strides.hpp +++ b/include/boost/numeric/ublas/tensor/dynamic_strides.hpp @@ -16,11 +16,11 @@ #define _BOOST_UBLAS_TENSOR_DYNAMIC_STRIDES_HPP_ #include -#include +#include #include -namespace boost { -namespace numeric { +namespace boost { +namespace numeric { namespace ublas { using first_order = column_major; @@ -191,7 +191,7 @@ class basic_strides constexpr base_type const& base() const{ return this->_base; } - + protected: base_type _base; }; diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index c61f0c1ef..a12e1c85b 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -10,8 +10,8 @@ // -#ifndef BOOST_UBLAS_TENSOR_SPAN -#define BOOST_UBLAS_TENSOR_SPAN +#ifndef BOOST_UBLAS_TENSOR_SPAN_HPP +#define BOOST_UBLAS_TENSOR_SPAN_HPP #include #include @@ -56,7 +56,7 @@ class span // e.g. a(1) or a(0) // TODO: case where stop < 0 then stop += length explicit span(value_type l) - : span(0,1,l) + : span(l,1,l) { } @@ -166,4 +166,4 @@ inline bool operator!=( return !(lhs == rhs); } -#endif // BOOST_UBLAS_TENSOR SPAN +#endif // BOOST_UBLAS_TENSOR_SPAN_HPP diff --git a/include/boost/numeric/ublas/tensor/strides.hpp b/include/boost/numeric/ublas/tensor/strides.hpp index 0dac93bb7..409bae4b0 100644 --- a/include/boost/numeric/ublas/tensor/strides.hpp +++ b/include/boost/numeric/ublas/tensor/strides.hpp @@ -16,35 +16,35 @@ #include #include -#include +// #include namespace boost::numeric::ublas{ - template && is_strides_v - , int> = 0 + , int> = 0 > [[nodiscard]] inline constexpr bool operator==(LStrides const& lhs, RStrides const& rhs) noexcept{ - static_assert( std::is_same_v, + static_assert( std::is_same_v, "boost::numeric::ublas::operator==(LStrides,RStrides) : LHS value type should be same as RHS value type"); return lhs.size() == rhs.size() && std::equal(lhs.begin(), lhs.end(), rhs.begin()); } - template && is_strides_v - , int> = 0 + , int> = 0 > [[nodiscard]] inline constexpr bool operator!=(LStrides const& lhs, RStrides const& rhs) noexcept{ - static_assert( std::is_same_v, + static_assert( std::is_same_v, "boost::numeric::ublas::operator!=(LStrides,RStrides) : LHS value type should be same as RHS value type"); return !( lhs == rhs ); } - + } // namespace boost::numeric::ublas @@ -62,7 +62,7 @@ namespace boost::numeric::ublas::detail { [[nodiscard]] inline constexpr auto access(std::vector const& i, Stride const& w) { - static_assert( is_strides_v, + static_assert( is_strides_v, "boost::numeric::ublas::detail::access() : invalid type, type should be a strides"); const auto p = i.size(); @@ -84,8 +84,8 @@ namespace boost::numeric::ublas::detail { template [[nodiscard]] constexpr auto access(std::size_t sum, Stride const& w, std::size_t i, size_types ... is) - { - static_assert( is_strides_v, + { + static_assert( is_strides_v, "boost::numeric::ublas::detail::access() : invalid type, type should be a strides"); sum += i*w[r]; if constexpr (sizeof...(is) == 0) diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp new file mode 100644 index 000000000..bc379f596 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -0,0 +1,196 @@ +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Fraunhofer and Google in producing this work +// which firsted as a Google Summer of Code project. +// + + +/// \file subtensor_utility.hpp Definition for the tensor template class + +#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ +#define _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ + +#include "extents.hpp" +#include "span.hpp" +#include "tags.hpp" + +#include +#include +#include + + +namespace boost::numeric::ublas::detail { + + +/*! @brief Computes span strides for a subtensor + * + * span stride v is computed according to: v[i] = w[i]*s[i], where + * w[i] is the i-th stride of the tensor + * s[i] is the step size of the i-th span + * + * @param[in] strides strides of the tensor, the subtensor refers to + * @param[in] spans vector of spans of the subtensor +*/ +template +auto to_span_strides(std::vector const& strides, Spans const& spans) +{ + if(strides.size() != spans.size()) + throw std::runtime_error("Error in boost::numeric::ublas::subtensor::to_span_strides(): tensor strides.size() != spans.size()"); + + auto span_strides = std::vector(spans.size()); + + std::transform(strides.begin(), strides.end(), spans.begin(), span_strides.begin(), + [](auto w, auto const& s) { return w * s.step(); } ); + + return std::vector( span_strides ); +} + +/*! @brief Computes the data pointer offset for a subtensor + * + * offset is computed according to: sum ( f[i]*w[i] ), where + * f[i] is the first element of the i-th span + * w[i] is the i-th stride of the tensor + * + * @param[in] strides strides of the tensor, the subtensor refers to + * @param[in] spans vector of spans of the subtensor +*/ +template +auto to_offset(std::vector const& strides, Spans const& spans) +{ + if(strides.size() != spans.size()) + throw std::runtime_error("Error in boost::numeric::ublas::subtensor::offset(): tensor strides.size() != spans.size()"); + + return std::inner_product(spans.begin(), spans.end(), strides.begin(), Size(0), + std::plus(), [](auto const& s, Size w) {return s.first() * w; } ); +} + + +/*! @brief Computes the extents of the subtensor. + * + * i-th extent is given by span[i].size() + * + * @param[in] spans vector of spans of the subtensor + */ +template +auto to_extents(spans_type const& spans) +{ + using extents_t = extents<>; + using base_type = typename extents_t::base_type; + if(spans.empty()) + return extents_t{}; + auto extents = base_type(spans.size()); + std::transform(spans.begin(), spans.end(), extents.begin(), [](auto const& s) { return s.size(); } ); + return extents_t( extents ); +} + + +/*! @brief Auxiliary function for subtensor which possibly transforms a span instance + * + * transform_span(span() ,4) -> span(0,3) + * transform_span(span(1,1) ,4) -> span(1,1) + * transform_span(span(1,3) ,4) -> span(1,3) + * transform_span(span(2,end),4) -> span(2,3) + * transform_span(span(end) ,4) -> span(3,3) + * + * @note span is zero-based indexed. + * + * @param[in] s span that is going to be transformed + * @param[in] extent extent that is maybe used for the tranformation + */ +template +auto transform_span(span const& s, std::size_t const extent) +{ + using span_type = span; + + std::size_t first = s.first(); + std::size_t last = s.last (); + + auto const extent0 = extent-1; + + size_type step = s.step (); + if(first >= extent) { + return (last >= extent ? span_type(extent , step, extent ) : + span_type(extent0, step, extent0)); + } + else if(last >= extent) return span_type(first , step, extent0); + else return span_type(first , step, last ); + return span_type{}; +} + + +template +void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ); + +template +void transform_spans_impl(extents<> const& extents, std::array& span_array, span const& s, Spans&& ... spans) +{ + std::get(span_array) = transform_span(s, extents[r]); + static constexpr auto nspans = sizeof...(spans); + static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); + if constexpr (nspans>0) + transform_spans_impl(extents, span_array, std::forward(spans)...); +} + +template +void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ) +{ + static constexpr auto nspans = sizeof...(Spans); + static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); + std::get(span_array) = transform_span(Span(arg), extents[r]); + if constexpr (nspans>0) + transform_spans_impl(extents, span_array, std::forward(spans) ... ); + +} + + +/*! @brief Auxiliary function for subtensor that generates array of spans + * + * generate_span_array(shape(4,3,5,2), span(), 1, span(2,end), end ) + * -> std::array (span(0,3), span(1,1), span(2,4),span(1,1)) + * + * @note span is zero-based indexed. + * + * @param[in] extents of the tensor + * @param[in] spans spans with which the subtensor is created + */ +template +auto generate_span_array(extents<> const& extents, Spans&& ... spans) +{ + constexpr static auto n = sizeof...(Spans); + if(extents.size() != n) + throw std::runtime_error("Error in boost::numeric::ublas::generate_span_vector() when creating subtensor: the number of spans does not match with the tensor rank."); + std::array span_array; + if constexpr (n>0) + transform_spans_impl<0>( extents, span_array, std::forward(spans)... ); + return span_array; +} + +/*! @brief Auxiliary function for subtensor that generates array of spans + * + * generate_span_vector(shape(4,3,5,2), span(), 1, span(2,end), end ) + * -> std::array (span(0,3), span(1,1), span(2,4),span(1,1)) + * + * @note span is zero-based indexed. + * + * @param[in] extents of the tensor + * @param[in] spans spans with which the subtensor is created + */ +template +auto generate_span_vector(extents<> const& extents, Spans&& ... spans) +{ + auto span_array = generate_span_array(extents,std::forward(spans)...); + return std::vector(span_array.begin(), span_array.end()); +} + +} // namespace boost::numeric::ublas::detail + + + + + +#endif diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp index 04a1b3662..07b2dcb04 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp @@ -26,7 +26,7 @@ #include "../traits/read_write_traits.hpp" #include "../type_traits.hpp" #include "subtensor_engine.hpp" -#include "subtensor_utility.hpp" +#include "../subtensor_utility.hpp" #include "tensor_engine.hpp" #include @@ -42,14 +42,12 @@ class tensor_core> using self_type = tensor_core; template - using tensor_expression_type = - detail::tensor_expression; + using tensor_expression_type = detail::tensor_expression; - template struct subtensor_iterator { - }; + // template struct subtensor_iterator { + // }; - static constexpr bool is_const = - std::is_const>::value; + static constexpr bool is_const = std::is_const>::value; using container_type = typename engine_type::container_type; using layout_type = typename engine_type::layout_type; @@ -72,13 +70,13 @@ class tensor_core> typename container_traits_type::pointer>; using const_pointer = typename container_traits_type::const_pointer; - using iterator = typename self_type::subtensor_iterator; - using const_iterator = - typename self_type::subtensor_iterator const; + // using iterator = typename self_type::subtensor_iterator; + // using const_iterator = + // typename self_type::subtensor_iterator const; - using reverse_iterator = typename container_traits_type::reverse_iterator; - using const_reverse_iterator = - typename container_traits_type::const_reverse_iterator; + // using reverse_iterator = typename container_traits_type::reverse_iterator; + // using const_reverse_iterator = + // typename container_traits_type::const_reverse_iterator; using container_tag = typename container_traits_type::container_tag; using resizable_tag = typename container_traits_type::resizable_tag; @@ -95,7 +93,7 @@ class tensor_core> tensor_core(U&& t, FS&& first, SL&&... spans) : _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) , _extents{} - , _strides(t.strides()) + , _strides(detail::to_span_strides(t.strides(), _spans)) , _tensor(t) { _extents = detail::to_extents(_spans); diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp index 0a0994ce8..3cc256032 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp @@ -16,10 +16,10 @@ namespace boost::numeric::ublas template struct subtensor_engine { - using tensor_type = std::decay_t; - using engine_type = typename tensor_type::engine_type; // reference to the parent engine - using extents_type = typename tensor_type::extents_type; // reference to the parent extents - using layout_type = typename tensor_type::layout_type; // reference to the parent layout + using tensor_type = std::decay_t; + using engine_type = typename tensor_type::engine_type; // reference to the parent engine + using extents_type = typename tensor_type::extents_type; // reference to the parent extents + using layout_type = typename tensor_type::layout_type; // reference to the parent layout using container_type = typename tensor_type::container_type; // reference to the parent container }; diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp index 280e7b47f..d8e0ebfe4 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_engine.hpp @@ -18,8 +18,8 @@ namespace boost::numeric::ublas{ template struct tensor_engine { - using extents_type = E; - using layout_type = L; + using extents_type = E; + using layout_type = L; using container_type = C; }; diff --git a/test/tensor/test_span.cpp b/test/tensor/test_span.cpp index 1b1da2a63..9df47d378 100644 --- a/test/tensor/test_span.cpp +++ b/test/tensor/test_span.cpp @@ -17,7 +17,7 @@ BOOST_AUTO_TEST_SUITE( span_testsuite ); struct fixture { - using span_type = boost::numeric::ublas::strided_span; + using span_type = boost::numeric::ublas::pan; fixture() : spans { @@ -39,7 +39,7 @@ struct fixture { BOOST_FIXTURE_TEST_CASE( ctor_test, fixture ) { - using span_type = boost::numeric::ublas::strided_span; + using span_type = boost::numeric::ublas::span<>; BOOST_CHECK_EQUAL (spans[0].first(),0); BOOST_CHECK_EQUAL (spans[0].step (),0); @@ -96,7 +96,7 @@ BOOST_FIXTURE_TEST_CASE( ctor_test, fixture ) BOOST_FIXTURE_TEST_CASE( copy_ctor_test, fixture ) { - using span_type = boost::numeric::ublas::strided_span; + using span_type = boost::numeric::ublas::span<>; BOOST_CHECK_EQUAL (span_type(spans[0]).first(),0); diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index ce01fedbb..d2f920c04 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -55,7 +55,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixt using value_type = typename value::first_type; using layout_type = typename value::second_type; using tensor_type = ublas::tensor_dynamic; - using subtensor_type = ublas::subtensor; + using subtensor_type = ublas::tensor_core>; auto check = [](auto const& e) { diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index c4f7fb2d9..18aeeb043 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -17,14 +17,15 @@ #include #include #include -#include + +#include BOOST_AUTO_TEST_SUITE ( subtensor_utility_testsuite ) struct fixture_span { - using span_type = boost::numeric::ublas::span; + using span_type = boost::numeric::ublas::span<>; fixture_span() : spans{ @@ -41,7 +42,7 @@ struct fixture_span { }; -BOOST_FIXTURE_TEST_CASE( transform_strided_span_test, fixture_span ) +BOOST_FIXTURE_TEST_CASE( transform_span_test, fixture_span ) { using namespace boost::numeric; @@ -76,7 +77,7 @@ BOOST_FIXTURE_TEST_CASE( transform_strided_span_test, fixture_span ) } struct fixture_shape { - using shape = boost::numeric::ublas::extents<>; + using shape = boost::numeric::ublas::extents<>; fixture_shape() : extents{ shape{}, // 0 @@ -97,7 +98,7 @@ struct fixture_shape { BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) { namespace ublas = boost::numeric::ublas; - using span = ublas::span; + using span = ublas::span<>; // shape{} { @@ -169,7 +170,7 @@ BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) } { - auto v = ublas::detail::generate_span_array(extents[5],1,span(),ublas::max); + auto v = ublas::detail::generate_span_array(extents[5],1,span(),ublas::max); auto r = std::vector{span(1,1),span(0,2),span(0,0)}; BOOST_CHECK ( std::equal( v.begin(), v.end(), r.begin(), [](span const& l, span const& r){ return l == r; } ) ); } @@ -179,7 +180,7 @@ BOOST_FIXTURE_TEST_CASE( generate_span_array_test, fixture_shape ) struct fixture_span_vector_shape { using shape = boost::numeric::ublas::extents<>; - using span = boost::numeric::ublas::span; + using span = boost::numeric::ublas::span<>; fixture_span_vector_shape() From 02d64ee92302beb691375f488e1cfc260fc0e206 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Thu, 19 Aug 2021 18:12:34 +0530 Subject: [PATCH 17/40] Minor fixes --- examples/tensor/access_subtensor.cpp | 14 +- include/boost/numeric/ublas/tensor/span.hpp | 2 +- .../ublas/tensor/subtensor_utility.hpp | 5 +- .../numeric/ublas/tensor/tensor/subtensor.hpp | 2 +- .../ublas/tensor/tensor/subtensor_utility.hpp | 196 ------------------ .../ublas/tensor/tensor/tensor_dynamic.hpp | 13 ++ test/tensor/Jamfile | 9 +- test/tensor/test_subtensor.cpp | 8 +- test/tensor/test_subtensor_utility.cpp | 2 +- 9 files changed, 31 insertions(+), 220 deletions(-) delete mode 100644 include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index 1d170eeec..4a98d1f7d 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -81,7 +81,6 @@ int main() std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "B=" << B << ";" << std::endl << std::endl; - auto C = tensor(B.extents()); // computes the complex conjugate of elements of B // using multi-index notation. @@ -96,14 +95,13 @@ int main() std::cout << "C=" << C << ";" << std::endl << std::endl; - - // computes the complex conjugate of elements of B - // using iterators. + // // computes the complex conjugate of elements of B + // // using iterators. auto D = tensor(B.extents()); - std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "D=" << D << ";" << std::endl << std::endl; + // // std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); + // std::cout << "% --------------------------- " << std::endl; + // std::cout << "% --------------------------- " << std::endl << std::endl; + // std::cout << "D=" << D << ";" << std::endl << std::endl; // reshaping tensors. auto new_extents = B.extents().base(); diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index a12e1c85b..24d799280 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -83,8 +83,8 @@ class span span(span const& other) : first_(other.first_) - , last_ (other.last_ ) , step_ (other.step_ ) + , last_ (other.last_ ) { } diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp index bc379f596..cfee95fd9 100644 --- a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -113,10 +113,7 @@ auto transform_span(span const& s, std::size_t const extent) auto const extent0 = extent-1; size_type step = s.step (); - if(first >= extent) { - return (last >= extent ? span_type(extent , step, extent ) : - span_type(extent0, step, extent0)); - } + if(first >= extent) return span_type(extent0, step, extent0); else if(last >= extent) return span_type(first , step, extent0); else return span_type(first , step, last ); return span_type{}; diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp index 07b2dcb04..bf6363e21 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp @@ -25,8 +25,8 @@ #include "../tags.hpp" #include "../traits/read_write_traits.hpp" #include "../type_traits.hpp" -#include "subtensor_engine.hpp" #include "../subtensor_utility.hpp" +#include "subtensor_engine.hpp" #include "tensor_engine.hpp" #include diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp deleted file mode 100644 index 19af629ff..000000000 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_utility.hpp +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Fraunhofer and Google in producing this work -// which firsted as a Google Summer of Code project. -// - - -/// \file subtensor_utility.hpp Definition for the tensor template class - -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ - -#include "../extents.hpp" -#include "../span.hpp" -#include "../tags.hpp" - -#include -#include -#include - - -namespace boost::numeric::ublas::detail { - - -/*! @brief Computes span strides for a subtensor - * - * span stride v is computed according to: v[i] = w[i]*s[i], where - * w[i] is the i-th stride of the tensor - * s[i] is the step size of the i-th span - * - * @param[in] strides strides of the tensor, the subtensor refers to - * @param[in] spans vector of spans of the subtensor -*/ -template -auto to_span_strides(std::vector const& strides, Spans const& spans) -{ - if(strides.size() != spans.size()) - throw std::runtime_error("Error in boost::numeric::ublas::subtensor::to_span_strides(): tensor strides.size() != spans.size()"); - - auto span_strides = std::vector(spans.size()); - - std::transform(strides.begin(), strides.end(), spans.begin(), span_strides.begin(), - [](auto w, auto const& s) { return w * s.step(); } ); - - return std::vector( span_strides ); -} - -/*! @brief Computes the data pointer offset for a subtensor - * - * offset is computed according to: sum ( f[i]*w[i] ), where - * f[i] is the first element of the i-th span - * w[i] is the i-th stride of the tensor - * - * @param[in] strides strides of the tensor, the subtensor refers to - * @param[in] spans vector of spans of the subtensor -*/ -template -auto to_offset(std::vector const& strides, Spans const& spans) -{ - if(strides.size() != spans.size()) - throw std::runtime_error("Error in boost::numeric::ublas::subtensor::offset(): tensor strides.size() != spans.size()"); - - return std::inner_product(spans.begin(), spans.end(), strides.begin(), Size(0), - std::plus(), [](auto const& s, Size w) {return s.first() * w; } ); -} - - -/*! @brief Computes the extents of the subtensor. - * - * i-th extent is given by span[i].size() - * - * @param[in] spans vector of spans of the subtensor - */ -template -auto to_extents(spans_type const& spans) -{ - using extents_t = extents<>; - using base_type = typename extents_t::base_type; - if(spans.empty()) - return extents_t{}; - auto extents = base_type(spans.size()); - std::transform(spans.begin(), spans.end(), extents.begin(), [](auto const& s) { return s.size(); } ); - return extents_t( extents ); -} - - -/*! @brief Auxiliary function for subtensor which possibly transforms a span instance - * - * transform_span(span() ,4) -> span(0,3) - * transform_span(span(1,1) ,4) -> span(1,1) - * transform_span(span(1,3) ,4) -> span(1,3) - * transform_span(span(2,end),4) -> span(2,3) - * transform_span(span(end) ,4) -> span(3,3) - * - * @note span is zero-based indexed. - * - * @param[in] s span that is going to be transformed - * @param[in] extent extent that is maybe used for the tranformation - */ -template -auto transform_span(span const& s, std::size_t const extent) -{ - using span_type = span; - - std::size_t first = s.first(); - std::size_t last = s.last (); - - auto const extent0 = extent-1; - - size_type step = s.step (); - if(first >= extent) { - return (last >= extent ? span_type(extent , step, extent ) : - span_type(extent0, step, extent0)); - } - else if(last >= extent) return span_type(first , step, extent0); - else return span_type(first , step, last ); - return span_type{}; -} - - -template -void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ); - -template -void transform_spans_impl(extents<> const& extents, std::array& span_array, span const& s, Spans&& ... spans) -{ - std::get(span_array) = transform_span(s, extents[r]); - static constexpr auto nspans = sizeof...(spans); - static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); - if constexpr (nspans>0) - transform_spans_impl(extents, span_array, std::forward(spans)...); -} - -template -void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ) -{ - static constexpr auto nspans = sizeof...(Spans); - static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); - std::get(span_array) = transform_span(Span(arg), extents[r]); - if constexpr (nspans>0) - transform_spans_impl(extents, span_array, std::forward(spans) ... ); - -} - - -/*! @brief Auxiliary function for subtensor that generates array of spans - * - * generate_span_array(shape(4,3,5,2), span(), 1, span(2,end), end ) - * -> std::array (span(0,3), span(1,1), span(2,4),span(1,1)) - * - * @note span is zero-based indexed. - * - * @param[in] extents of the tensor - * @param[in] spans spans with which the subtensor is created - */ -template -auto generate_span_array(extents<> const& extents, Spans&& ... spans) -{ - constexpr static auto n = sizeof...(Spans); - if(extents.size() != n) - throw std::runtime_error("Error in boost::numeric::ublas::generate_span_vector() when creating subtensor: the number of spans does not match with the tensor rank."); - std::array span_array; - if constexpr (n>0) - transform_spans_impl<0>( extents, span_array, std::forward(spans)... ); - return span_array; -} - -/*! @brief Auxiliary function for subtensor that generates array of spans - * - * generate_span_vector(shape(4,3,5,2), span(), 1, span(2,end), end ) - * -> std::array (span(0,3), span(1,1), span(2,4),span(1,1)) - * - * @note span is zero-based indexed. - * - * @param[in] extents of the tensor - * @param[in] spans spans with which the subtensor is created - */ -template -auto generate_span_vector(extents<> const& extents, Spans&& ... spans) -{ - auto span_array = generate_span_array(extents,std::forward(spans)...); - return std::vector(span_array.begin(), span_array.end()); -} - -} // namespace boost::numeric::ublas::detail - - - - - -#endif diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index bd1dab28e..a0d71b1c4 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -167,6 +167,19 @@ template { } + /** @brief Constructs a tensor_core with another tensor_core with a subtensor_engine + * + * @param other tensor_core with a subtensor_engine to be copied. + */ + template + explicit inline tensor_core (const tensor_core> &other) + : tensor_expression_type{} + , _extents (ublas::begin(other.extents ()), ublas::end (other.extents ())) + , _strides (ublas::to_strides(_extents, layout_type{})) + , _container(ublas::product(_extents)) + { + detail::eval(*this, other); + } /** @brief Constructs a tensor_core with an tensor_core expression * diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index ffeb7173c..6b06bd131 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -47,7 +47,7 @@ test-suite boost-ublas-tensor-test test_fixed_rank_operators_comparison.cpp test_fixed_rank_strides.cpp test_fixed_rank_tensor.cpp - test_fixed_rank_tensor_matrix_vector.cpp + test_fixed_rank_tensor_matrix_vector.cpp test_functions.cpp test_multi_index.cpp test_multi_index_utility.cpp @@ -62,8 +62,8 @@ test-suite boost-ublas-tensor-test test_static_tensor.cpp test_static_tensor_matrix_vector.cpp test_strides.cpp - test_subtensor.cpp - test_subtensor_utility.cpp + # test_subtensor.cpp + test_subtensor_utility.cpp test_tensor.cpp test_tensor_matrix_vector.cpp unit_test_framework @@ -72,7 +72,6 @@ test-suite boost-ublas-tensor-test : : test_tensor : - # + # ] ; - diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index d2f920c04..be6012877 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -60,7 +60,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixt auto check = [](auto const& e) { auto t = tensor_type(e); - auto s = subtensor_type(t); + auto s = t() BOOST_CHECK_EQUAL ( s.size() , t.size() ); BOOST_CHECK_EQUAL ( s.rank() , t.rank() ); if(ublas::empty(e)) { @@ -83,11 +83,11 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixt BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) { - namespace ub = boost::numeric::ublas; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ub::tensor_dynamic; - using subtensor_type = ub::subtensor; + using tensor_type = ublas::tensor_dynamic; + using subtensor_type = ublas::subtensor; using span = ub::sliced_span; diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index 18aeeb043..fad2d4870 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -26,7 +26,7 @@ BOOST_AUTO_TEST_SUITE ( subtensor_utility_testsuite ) struct fixture_span { using span_type = boost::numeric::ublas::span<>; - + fixture_span() : spans{ span_type(), // 0, a(:) From dd83aeaa71c70f6b1382d1ac040b4971e639da27 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Thu, 19 Aug 2021 20:13:04 +0530 Subject: [PATCH 18/40] subtensor tensor expresssion --- examples/tensor/Jamfile | 4 +- examples/tensor/access_subtensor.cpp | 5 +- examples/tensor/expressions_subtensor.cpp | 16 +++-- examples/tensor/instantiate_subtensor.cpp | 31 +--------- .../ublas/tensor/operators_arithmetic.hpp | 14 +++-- .../numeric/ublas/tensor/tensor/subtensor.hpp | 60 ++++++++++++++++--- .../ublas/tensor/tensor/tensor_dynamic.hpp | 30 +++++++--- test/tensor/test_subtensor.cpp | 42 ++++++------- 8 files changed, 119 insertions(+), 83 deletions(-) diff --git a/examples/tensor/Jamfile b/examples/tensor/Jamfile index 511c677e4..d8dcfcfff 100644 --- a/examples/tensor/Jamfile +++ b/examples/tensor/Jamfile @@ -24,4 +24,6 @@ exe simple_expressions : simple_expressions.cpp ; exe multiply_tensors_product_function : multiply_tensors_product_function.cpp ; exe multiply_tensors_einstein_notation : multiply_tensors_einstein_notation.cpp ; exe instantiate_tensor : instantiate_tensor.cpp ; -exe subtensor : subtensor.cpp ; +exe expressions_subtensor : expressions_subtensor.cpp ; +exe instantiate_subtensor : instantiate_subtensor.cpp ; +exe access_subtensor : access_subtensor.cpp ; diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index 4a98d1f7d..de316085b 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -42,10 +42,13 @@ int main() for(auto i = 0u; i < A.size(); ++i, vf += 1.0f) A[i] = vf; + tensor t2 = A; + // formatted output std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "A=" << A << ";" << std::endl << std::endl; + std::cout << "t1=" << t1 << ";" << std::endl << std::endl; } catch (const std::exception& e) { std::cerr << "Cought exception " << e.what(); std::cerr << "in the main function of access-tensor." << std::endl; @@ -68,7 +71,7 @@ int main() //NOLINTNEXTLINE tensor t1 = zeros(5,4,3,2); - auto B = t1(span(), span(), span(), span(), span()); + auto B = t1(span(), span(), span(), span()); // initializes the tensor with increasing values along the last-index // using a single-index diff --git a/examples/tensor/expressions_subtensor.cpp b/examples/tensor/expressions_subtensor.cpp index e5cda496b..6580831e3 100644 --- a/examples/tensor/expressions_subtensor.cpp +++ b/examples/tensor/expressions_subtensor.cpp @@ -30,14 +30,12 @@ int main() auto t1 = tensor{3,4,2}; - auto A = t1(span(1,ublas::max), span(), span(0,1)); + auto A = t1(span(), span(), span()); tensor B = A = 2; - // Calling overloaded operators - // and using simple tensor expression templates. - if( A != (B+1) ){ - A += 2*B - 1; + if (A == B) { + std::cout << "Equal" << std::endl; } // formatted output @@ -45,12 +43,18 @@ int main() std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "A=" << A << ";" << std::endl << std::endl; + B = 1; + A = B + 1; + + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "A=" << A << ";" << std::endl << std::endl; + auto n = shape{3,4}; auto D = matrix(n[0],n[1],1); auto e = vector(n[1],1); auto f = vector(n[0],2); - auto C = t1(span(0,0)); // Calling constructor with // vector expression templates tensor C = 2*f; diff --git a/examples/tensor/instantiate_subtensor.cpp b/examples/tensor/instantiate_subtensor.cpp index 790b5a808..71574bc10 100644 --- a/examples/tensor/instantiate_subtensor.cpp +++ b/examples/tensor/instantiate_subtensor.cpp @@ -3,41 +3,19 @@ using namespace boost::numeric::ublas; void instantiate_subtensor_dynamic() -{ - namespace ublas = boost::numeric::ublas; - using value = float; - using layout = boost::numeric::ublas::layout::first_order; // storage format - using tensor = boost::numeric::ublas::tensor_static_rank; - constexpr auto ones = ublas::ones_static_rank{}; - - try { - tensor t1 = ones(3, 4, 2); - std::cout << "t1 = " << t1 << std::endl; - - auto st1 = t1(span(0,ublas::max), span(), span(0,1)); - - std::cout << "st1 = " << st1 << std::endl; - - } catch (const std::exception& e) { - std::cerr << "Cought exception " << e.what(); - std::cerr << "in the instantiate_tensor_dynamic function of instantiate-tensor." << std::endl; - throw; - } -} - -void instantiate_subtensor_dynamic_with_static_order() { namespace ublas = boost::numeric::ublas; using value = float; using layout = ublas::layout::first_order; // storage format using tensor = boost::numeric::ublas::tensor_dynamic; constexpr auto ones = ublas::ones{}; + using span = ublas::span<>; try { tensor t1 = ones(3, 4, 2); std::cout << "t1 = " << t1 << std::endl; - auto st1 = t1(span(1,ublas::max), span(), span(0,1)); + auto st1 = t1(span(0,ublas::max), span(), span(0,1)); std::cout << "st1 = " << st1 << std::endl; @@ -48,18 +26,15 @@ void instantiate_subtensor_dynamic_with_static_order() } } - -void instantiate_subtensor_static() +void instantiate_subtensor_dynamic_with_static_order() { } - int main() { try{ instantiate_subtensor_dynamic(); instantiate_subtensor_dynamic_with_static_order(); - instantiate_subtensor_static(); } catch (const std::exception& e) { std::cerr << "Cought exception " << e.what(); std::cerr << "in the main function of instantiate-tensor." << std::endl; diff --git a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp index fa89d431f..38b95488f 100644 --- a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp +++ b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp @@ -28,10 +28,12 @@ namespace boost::numeric::ublas template class tensor_core; +template +class subtensor_engine; + template class matrix_expression; - template class vector_expression; @@ -205,7 +207,7 @@ inline template inline constexpr auto operator+( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) + boost::numeric::ublas::detail::tensor_expression const& rhs) { static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, @@ -225,7 +227,7 @@ inline template inline constexpr auto operator-( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) + boost::numeric::ublas::detail::tensor_expression const& rhs) { static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, @@ -246,7 +248,7 @@ inline template inline constexpr auto operator*( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) + boost::numeric::ublas::detail::tensor_expression const& rhs) { static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, @@ -266,7 +268,7 @@ inline template inline constexpr auto operator/( boost::numeric::ublas::detail::tensor_expression const& lhs, - boost::numeric::ublas::detail::tensor_expression const& rhs) + boost::numeric::ublas::detail::tensor_expression const& rhs) { static_assert( std::is_same_v< typename T1::value_type, typename T2::value_type>, @@ -422,7 +424,7 @@ inline } template -constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, +constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, typename boost::numeric::ublas::tensor_core::const_reference r) { boost::numeric::ublas::detail::eval(lhs, [r](auto& l) { l/=r; } ); diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp index bf6363e21..279622c99 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp @@ -35,8 +35,9 @@ namespace boost::numeric::ublas { template class tensor_core> - : public detail::tensor_expression>, - tensor_core>> { + : public detail::tensor_expression< + tensor_core>, + tensor_core>> { public: using engine_type = subtensor_engine; using self_type = tensor_core; @@ -44,6 +45,9 @@ class tensor_core> template using tensor_expression_type = detail::tensor_expression; + template + using parent_tensor_expression_type = detail::tensor_expression; + // template struct subtensor_iterator { // }; @@ -89,12 +93,32 @@ class tensor_core> tensor_core(const tensor_core&) = default; + tensor_core(T& t) + : tensor_expression_type{} + , _spans() + , _extents(t.extents()) + , _strides(t.strides()) + , _tensor(t) + { + } + template tensor_core(U&& t, FS&& first, SL&&... spans) - : _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) + : tensor_expression_type{} + , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) , _extents{} , _strides(detail::to_span_strides(t.strides(), _spans)) , _tensor(t) + { + _extents = detail::to_extents(_spans); + } + + tensor_core(tensor_core&& v) + : tensor_expression_type{} + , _spans (std::move(v._spans)) + , _extents(std::move(v._extents)) + , _strides(std::move(v._strides)) + , _tensor (v._tensor) { _extents = detail::to_extents(_spans); for (int i = 0; i < (int) _extents.size(); i++) { @@ -122,18 +146,36 @@ class tensor_core> return *this; } + /** @brief Evaluates the tensor_expression and assigns the results to the + * tensor_core + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must + * conform with this tensor_core. + * + * @param expr expression that is evaluated. + */ + template + tensor_core& operator=(const parent_tensor_expression_type& expr) + { + detail::eval(*this, expr); + return *this; + } + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) tensor_core& operator=(tensor_core other) noexcept { - swap(*this, other); + swap (*this, other); return *this; } - // tensor_core& operator=(const_reference v) - // { - // std::fill_n(_container.begin(), _container.size(), v); - // return *this; - // } + tensor_core& operator=(const_reference v) + { + for(auto i = 0u; i < this->size(); ++i) + this->at(i) = v; + return *this; + } /** @brief Element access using a multi-index with bound checking which can * throw an exception. diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index a0d71b1c4..3981f9c44 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -89,6 +89,10 @@ template using subtensor_type = tensor_core>; + template + using subtensor_expression_type = detail::tensor_expression; + + explicit tensor_core () = default; /** @brief Constructs a tensor_core with a \c shape @@ -178,6 +182,7 @@ template , _strides (ublas::to_strides(_extents, layout_type{})) , _container(ublas::product(_extents)) { + std::cout << "called" << std::endl; detail::eval(*this, other); } @@ -295,10 +300,25 @@ template return *this; } + /** @brief Evaluates the tensor_expression and assigns the results to the tensor_core + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must conform with this tensor_core. + * + * @param expr expression that is evaluated. + */ + template + tensor_core &operator = (const subtensor_expression_type &expr) + { + detail::eval(*this, expr); + return *this; + } + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) tensor_core& operator=(tensor_core other) noexcept { - swap (*this, other); + detail::eval(*this, other); return *this; } @@ -450,14 +470,6 @@ template return subtensor_type(*this, std::forward(s), std::forward(spans)...); } - friend void swap(tensor_core& lhs, tensor_core& rhs) - { - std::swap(lhs._extents , rhs._extents); - std::swap(lhs._strides , rhs._strides); - std::swap(lhs._container , rhs._container); - } - - [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } [[nodiscard]] inline auto end () const noexcept -> const_iterator { return _container.end (); } [[nodiscard]] inline auto begin () noexcept -> iterator { return _container.begin (); } diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index be6012877..f0d051dfa 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -60,7 +60,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixt auto check = [](auto const& e) { auto t = tensor_type(e); - auto s = t() + auto s = subtensor_type(t); BOOST_CHECK_EQUAL ( s.size() , t.size() ); BOOST_CHECK_EQUAL ( s.rank() , t.rank() ); if(ublas::empty(e)) { @@ -87,17 +87,16 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) using value_type = typename value::first_type; using layout_type = typename value::second_type; using tensor_type = ublas::tensor_dynamic; - using subtensor_type = ublas::subtensor; - using span = ub::sliced_span; + using subtensor_type = ublas::tensor_core>; + using span = ublas::span; { auto A = tensor_type{}; auto Asub = subtensor_type( A ); - BOOST_CHECK( Asub.span_strides() == A.strides() ); BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.getExtents()() == A.extents() ); + BOOST_CHECK( Asub.extents() == A.extents() ); BOOST_CHECK( Asub.data() == A.data() ); } @@ -107,9 +106,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) auto A = tensor_type{1,1}; auto Asub = subtensor_type( A, 0, 0 ); - BOOST_CHECK( Asub.span_strides() == A.strides() ); BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.getExtents()() == A.extents() ); + BOOST_CHECK( Asub.extents() == A.extents() ); BOOST_CHECK( Asub.data() == A.data() ); } @@ -118,9 +116,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) auto A = tensor_type{1,2}; auto Asub = subtensor_type( A, 0, span{} ); - BOOST_CHECK( Asub.span_strides() == A.strides() ); BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.getExtents()() == A.extents() ); + BOOST_CHECK( Asub.extents() == A.extents() ); BOOST_CHECK( Asub.data() == A.data() ); } { @@ -133,8 +130,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) BOOST_CHECK_EQUAL( Asub.strides().at(0), 1 ); BOOST_CHECK_EQUAL( Asub.strides().at(1), 1 ); - BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 1 ); - BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 1 ); + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); BOOST_CHECK_EQUAL( Asub.data() , A.data()+ Asub.spans().at(0).first()*A.strides().at(0) + @@ -145,13 +142,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) { auto A = tensor_type{2,3}; auto Asub = subtensor_type( A, 0, 1 ); - auto B = tensor_type(Asub.getExtents()()); + auto B = tensor_type(Asub.extents()); BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 1 ); - BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 1 ); + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); @@ -164,13 +161,13 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) { auto A = tensor_type{4,3}; auto Asub = subtensor_type( A, span(1,2), span(1,ub::max) ); - auto B = tensor_type(Asub.getExtents()()); + auto B = tensor_type(Asub.extents()); BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 2 ); - BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); @@ -184,15 +181,15 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) auto A = tensor_type{4,3,5}; auto Asub = subtensor_type( A, span(1,2), span(1,ub::max), span(2,4) ); - auto B = tensor_type(Asub.getExtents()()); + auto B = tensor_type(Asub.extents()); BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); BOOST_CHECK_EQUAL( Asub.span_strides().at(2), A.strides().at(2) ); - BOOST_CHECK_EQUAL( Asub.getExtents()().at(0) , 2 ); - BOOST_CHECK_EQUAL( Asub.getExtents()().at(1) , 2 ); - BOOST_CHECK_EQUAL( Asub.getExtents()().at(2) , 3 ); + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(2) , 3 ); BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); @@ -231,9 +228,8 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, f auto Bsub = subtensor_type( A ); - BOOST_CHECK( Asub.span_strides() == A.strides() ); BOOST_CHECK( Asub.strides() == A.strides() ); - BOOST_CHECK( Asub.getExtents()() == A.extents() ); + BOOST_CHECK( Asub.extents() == A.extents() ); BOOST_CHECK( Asub.data() == A.data() ); BOOST_CHECK( Bsub.span_strides() == A.strides() ); From 4fafcc820da8e777add60d08f8ac1fde1440555f Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Thu, 19 Aug 2021 20:31:21 +0530 Subject: [PATCH 19/40] Minor Fixes --- include/boost/numeric/ublas/tensor/operators_arithmetic.hpp | 2 +- include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp index 38b95488f..2628ef84f 100644 --- a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp +++ b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp @@ -29,7 +29,7 @@ template class tensor_core; template -class subtensor_engine; +struct subtensor_engine; template class matrix_expression; diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index 3981f9c44..ca9651ed4 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -182,7 +182,6 @@ template , _strides (ublas::to_strides(_extents, layout_type{})) , _container(ublas::product(_extents)) { - std::cout << "called" << std::endl; detail::eval(*this, other); } From 027c3fffc2d9069d18c23c7cd3b4b3ccb9b0d58c Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Thu, 19 Aug 2021 20:46:52 +0530 Subject: [PATCH 20/40] Minor Fixes --- .../boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index ca9651ed4..e25427cbe 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -317,7 +317,7 @@ template // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) tensor_core& operator=(tensor_core other) noexcept { - detail::eval(*this, other); + swap (*this, other); return *this; } @@ -452,6 +452,12 @@ template return std::make_pair( std::cref(*this), std::make_tuple( p, std::forward(ps)... ) ); } + friend void swap(tensor_core& lhs, tensor_core& rhs) + { + std::swap(lhs._extents , rhs._extents); + std::swap(lhs._strides , rhs._strides); + std::swap(lhs._container , rhs._container); + } /** * @brief Generates a subtensor from a tensor From 12d996e132f73261969db974b7a91164cde1e1b9 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Thu, 19 Aug 2021 21:58:04 +0530 Subject: [PATCH 21/40] Add += operator --- examples/tensor/expressions_subtensor.cpp | 4 +- .../ublas/tensor/expression_evaluation.hpp | 35 +++++++++- .../ublas/tensor/operators_arithmetic.hpp | 70 +++++++++++++++++++ include/boost/numeric/ublas/tensor/span.hpp | 26 ------- .../numeric/ublas/tensor/tensor/subtensor.hpp | 3 - 5 files changed, 104 insertions(+), 34 deletions(-) diff --git a/examples/tensor/expressions_subtensor.cpp b/examples/tensor/expressions_subtensor.cpp index 6580831e3..99096a095 100644 --- a/examples/tensor/expressions_subtensor.cpp +++ b/examples/tensor/expressions_subtensor.cpp @@ -43,8 +43,8 @@ int main() std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "A=" << A << ";" << std::endl << std::endl; - B = 1; - A = B + 1; + B += 1; + A = B; std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; diff --git a/include/boost/numeric/ublas/tensor/expression_evaluation.hpp b/include/boost/numeric/ublas/tensor/expression_evaluation.hpp index d29b6eabe..1e3b18569 100644 --- a/include/boost/numeric/ublas/tensor/expression_evaluation.hpp +++ b/include/boost/numeric/ublas/tensor/expression_evaluation.hpp @@ -289,13 +289,13 @@ inline void eval(tensor_type& lhs, tensor_expression, "boost::numeric::ublas::detail::eval(tensor_type&, tensor_expression const&) : " - "tensor_type and tensor_expresssion should have same value type" + "tensor_type and tensor_expression should have same value type" ); if ( !detail::all_extents_equal(expr, lhs.extents() ) ){ throw std::runtime_error("Error in boost::numeric::ublas::tensor_core: expression contains tensors with different shapes."); - } - + } + #pragma omp parallel for for(auto i = 0u; i < lhs.size(); ++i) lhs(i) = expr()(i); @@ -322,6 +322,35 @@ inline void eval(tensor_type& lhs, tensor_expression } +/** @brief Evaluates expression for a tensor_core + * + * Assigns the results of the expression to the tensor_core. + * + * \note Checks if shape of the tensor_core matches those of all tensors within the expression. +*/ +template +inline void eval(tensor_type& lhs, tensor_expression const& expr, unary_fn const fn) +{ + +// static_assert(is_valid_tensor_v && is_valid_tensor_v, +// "boost::numeric::ublas::detail::eval(tensor_type&, tensor_expression const&) : " +// "tensor_type and tensor_expresssion should be a valid tensor type" +// ); + + static_assert(std::is_same_v, + "boost::numeric::ublas::detail::eval(tensor_type&, tensor_expression const&) : " + "tensor_type and tensor_expression should have same value type" + ); + + if ( !detail::all_extents_equal(expr, lhs.extents() ) ){ + throw std::runtime_error("Error in boost::numeric::ublas::tensor_core: expression contains tensors with different shapes."); + } + + #pragma omp parallel for + for(auto i = 0u; i < lhs.size(); ++i) + fn(lhs(i), expr()(i)); +} + /** @brief Evaluates expression for a tensor_core * diff --git a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp index 2628ef84f..fc4492a0b 100644 --- a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp +++ b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp @@ -393,7 +393,77 @@ inline return lhs; } +template +inline + constexpr auto& operator += (boost::numeric::ublas::tensor_core& lhs, + const boost::numeric::ublas::detail::tensor_expression>,D> &expr) +{ + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l+=r; } ); + return lhs; +} + +template +inline + constexpr auto& operator -= (boost::numeric::ublas::tensor_core& lhs, + const boost::numeric::ublas::detail::tensor_expression>,D> &expr) +{ + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l-=r; } ); + return lhs; +} + +template +inline + constexpr auto& operator *= (boost::numeric::ublas::tensor_core& lhs, + const boost::numeric::ublas::detail::tensor_expression>,D> &expr) +{ + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l*=r; } ); + return lhs; +} + +template +inline + constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, + const boost::numeric::ublas::detail::tensor_expression>,D> &expr) +{ + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l/=r; } ); + return lhs; +} +template +inline + constexpr auto& operator += (boost::numeric::ublas::tensor_core>& lhs, + const boost::numeric::ublas::detail::tensor_expression &expr) +{ + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l+=r; } ); + return lhs; +} + +template +inline + constexpr auto& operator -= (boost::numeric::ublas::tensor_core>& lhs, + const boost::numeric::ublas::detail::tensor_expression &expr) +{ + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l-=r; } ); + return lhs; +} + +template +inline + constexpr auto& operator *= (boost::numeric::ublas::tensor_core>& lhs, + const boost::numeric::ublas::detail::tensor_expression &expr) +{ + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l*=r; } ); + return lhs; +} + +template +inline + constexpr auto& operator /= (boost::numeric::ublas::tensor_core>& lhs, + const boost::numeric::ublas::detail::tensor_expression &expr) +{ + boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l/=r; } ); + return lhs; +} template diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 24d799280..8bc36d0f7 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -102,20 +102,6 @@ class span ~span() = default; - inline value_type operator[] (std::size_t idx) const - { - return first_ + idx * step_; - } - - inline span operator()(const span &rhs) const - { - auto const& lhs = *this; - return span( - rhs.first_*lhs.step_ + lhs.first_, - lhs.step_ *rhs.step_, - rhs.last_ *lhs.step_ + lhs.first_ ); - } - [[ nodiscard ]] inline value_type size() const { if (first_ == last_) { return value_type(1); @@ -128,18 +114,6 @@ class span value_type first_, step_, last_ ; }; -template -inline auto ran(unsigned_type_lhs f, unsigned_type_rhs l) -{ - return span(f,l); -} - -template -inline auto ran(unsigned_type_left f, unsigned_type_middle s, unsigned_type_right l) -{ - return span(f,s,l); -} - } // namespace diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp index 279622c99..0def6c407 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp @@ -121,9 +121,6 @@ class tensor_core> , _tensor (v._tensor) { _extents = detail::to_extents(_spans); - for (int i = 0; i < (int) _extents.size(); i++) { - std::cout << _extents[i] << std::endl; - } } /// @brief Default destructor From e03c2a9578a633ebfcb73043bb67716b1ae2db93 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Thu, 19 Aug 2021 23:18:32 +0530 Subject: [PATCH 22/40] WIP: static rank subtensor --- examples/tensor/expressions_subtensor.cpp | 2 +- include/boost/numeric/ublas/tensor/tensor.hpp | 2 +- .../{subtensor.hpp => subtensor_dynamic.hpp} | 35 +- .../ublas/tensor/tensor/subtensor_engine.hpp | 4 + .../tensor/tensor/subtensor_static_rank.hpp | 376 ++++++++++++++++++ .../ublas/tensor/tensor/tensor_dynamic.hpp | 2 +- .../tensor/tensor/tensor_static_rank.hpp | 4 +- .../ublas/tensor/traits/slice_traits.hpp | 54 --- test/tensor/Jamfile | 64 +-- test/tensor/test_subtensor.cpp | 49 +-- 10 files changed, 448 insertions(+), 144 deletions(-) rename include/boost/numeric/ublas/tensor/tensor/{subtensor.hpp => subtensor_dynamic.hpp} (93%) create mode 100644 include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp delete mode 100644 include/boost/numeric/ublas/tensor/traits/slice_traits.hpp diff --git a/examples/tensor/expressions_subtensor.cpp b/examples/tensor/expressions_subtensor.cpp index 99096a095..64b0382d9 100644 --- a/examples/tensor/expressions_subtensor.cpp +++ b/examples/tensor/expressions_subtensor.cpp @@ -44,7 +44,7 @@ int main() std::cout << "A=" << A << ";" << std::endl << std::endl; B += 1; - A = B; + A += B; std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; diff --git a/include/boost/numeric/ublas/tensor/tensor.hpp b/include/boost/numeric/ublas/tensor/tensor.hpp index c56e5e981..a64307450 100644 --- a/include/boost/numeric/ublas/tensor/tensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor.hpp @@ -18,6 +18,6 @@ #include "tensor/tensor_engine.hpp" #include "tensor/tensor_static_rank.hpp" #include "tensor/tensor_static.hpp" -#include "tensor/subtensor.hpp" +#include "tensor/subtensor_dynamic.hpp" #endif // BOOST_UBLAS_TENSOR_TENSOR_HPP diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp similarity index 93% rename from include/boost/numeric/ublas/tensor/tensor/subtensor.hpp rename to include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index 0def6c407..d7e549a4b 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -9,8 +9,8 @@ /// \file subtensor.hpp Definition for the subtensor template class -#ifndef BOOST_UBLAS_SUBTENSOR_HPP -#define BOOST_UBLAS_SUBTENSOR_HPP +#ifndef BOOST_UBLAS_SUBTENSOR_DYNAMIC_HPP +#define BOOST_UBLAS_SUBTENSOR_DYNAMIC_HPP #include "../access.hpp" #include "../algorithms.hpp" @@ -26,6 +26,7 @@ #include "../traits/read_write_traits.hpp" #include "../type_traits.hpp" #include "../subtensor_utility.hpp" +#include "../tensor.hpp" #include "subtensor_engine.hpp" #include "tensor_engine.hpp" @@ -33,25 +34,29 @@ namespace boost::numeric::ublas { -template -class tensor_core> +template +class tensor_core>> : public detail::tensor_expression< - tensor_core>, - tensor_core>> { + tensor_core>>, + tensor_core>>> { public: - using engine_type = subtensor_engine; + using tensor_type = tensor_dynamic; + using engine_type = subtensor_engine; using self_type = tensor_core; template using tensor_expression_type = detail::tensor_expression; - + template + using matrix_expression_type = matrix_expression; + template + using vector_expression_type = vector_expression; template - using parent_tensor_expression_type = detail::tensor_expression; + using parent_tensor_expression_type = detail::tensor_expression; // template struct subtensor_iterator { // }; - static constexpr bool is_const = std::is_const>::value; + static constexpr bool is_const = std::is_const>::value; using container_type = typename engine_type::container_type; using layout_type = typename engine_type::layout_type; @@ -93,7 +98,7 @@ class tensor_core> tensor_core(const tensor_core&) = default; - tensor_core(T& t) + tensor_core(tensor_type& t) : tensor_expression_type{} , _spans() , _extents(t.extents()) @@ -365,8 +370,8 @@ class tensor_core> [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } - [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data();} - [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data();} + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), spans_);} + [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), spans_);} [[nodiscard]] inline auto const& base () const noexcept { return _tensor.container(); } private: @@ -376,9 +381,11 @@ class tensor_core> std::vector _spans; extents_type _extents; strides_type _strides; - T& _tensor; + tensor_type& _tensor; }; +template +using subtensor = tensor_core>; } // namespace boost::numeric::ublas diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp index 3cc256032..33320060c 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp @@ -23,6 +23,10 @@ struct subtensor_engine using container_type = typename tensor_type::container_type; // reference to the parent container }; + +template +using subtensor = tensor_core>; + } // namespace boost::numeric::ublas #endif diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp new file mode 100644 index 000000000..1dfbe0b32 --- /dev/null +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -0,0 +1,376 @@ +// +// Copyright (c) 2021, Kannav Mehta, kmkannavkmehta@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +//: + + +/// \file subtensor.hpp Definition for the subtensor template class + +#ifndef BOOST_UBLAS_SUBTENSOR_STATIC_RANK_HPP +#define BOOST_UBLAS_SUBTENSOR_STATIC_RANK_HPP + +#include "../access.hpp" +#include "../algorithms.hpp" +#include "../concepts.hpp" +#include "../expression.hpp" +#include "../expression_evaluation.hpp" +#include "../extents.hpp" +#include "../index.hpp" +#include "../index_functions.hpp" +#include "../layout.hpp" +#include "../span.hpp" +#include "../tags.hpp" +#include "../traits/read_write_traits.hpp" +#include "../type_traits.hpp" +#include "../subtensor_utility.hpp" +#include "subtensor_engine.hpp" +#include "tensor_engine.hpp" + +#include + +namespace boost::numeric::ublas { + +template +class tensor_core>> + : public detail::tensor_expression< + tensor_core>>, + tensor_core>>> { +public: + using tensor_type = tensor_static_rank; + using engine_type = subtensor_engine; + using self_type = tensor_core; + + template + using tensor_expression_type = detail::tensor_expression; + template + using matrix_expression_type = matrix_expression; + template + using vector_expression_type = vector_expression; + template + using parent_tensor_expression_type = detail::tensor_expression; + + // template struct subtensor_iterator { + // }; + + static constexpr bool is_const = std::is_const>::value; + + using container_type = typename engine_type::container_type; + using layout_type = typename engine_type::layout_type; + using extents_type = typename engine_type::extents_type; + using strides_type = typename extents_type::base_type; + + using container_traits_type = container_traits; + + using size_type = typename container_traits_type::size_type; + using difference_type = typename container_traits_type::difference_type; + using value_type = typename container_traits_type::value_type; + + using reference = std::conditional_t; + using const_reference = typename container_traits_type::const_reference; + + using pointer = std::conditional_t; + using const_pointer = typename container_traits_type::const_pointer; + + // using iterator = typename self_type::subtensor_iterator; + // using const_iterator = + // typename self_type::subtensor_iterator const; + + // using reverse_iterator = typename container_traits_type::reverse_iterator; + // using const_reverse_iterator = + // typename container_traits_type::const_reverse_iterator; + + using matrix_type = matrix >; + using vector_type = vector >; + + using container_tag = typename container_traits_type::container_tag; + using resizable_tag = typename container_traits_type::resizable_tag; + + using span_type = span; + + using subtensor_type = self_type; + + explicit tensor_core() = delete; + + tensor_core(const tensor_core&) = default; + + tensor_core(tensor_type& t) + : tensor_expression_type{} + , _spans() + , _extents(t.extents()) + , _strides(t.strides()) + , _tensor(t) + { + } + + template + tensor_core(U&& t, FS&& first, SL&&... spans) + : tensor_expression_type{} + , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) + , _extents{} + , _strides(detail::to_span_strides(t.strides(), _spans)) + , _tensor(t) + { + _extents = detail::to_extents(_spans); + } + + tensor_core(tensor_core&& v) + : tensor_expression_type{} + , _spans (std::move(v._spans)) + , _extents(std::move(v._extents)) + , _strides(std::move(v._strides)) + , _tensor (v._tensor) + { + _extents = detail::to_extents(_spans); + } + + /// @brief Default destructor + ~tensor_core() = default; + + /** @brief Evaluates the tensor_expression and assigns the results to the + * tensor_core + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must + * conform with this tensor_core. + * + * @param expr expression that is evaluated. + */ + template + tensor_core& operator=(const tensor_expression_type& expr) + { + detail::eval(*this, expr); + return *this; + } + + /** @brief Evaluates the tensor_expression and assigns the results to the + * tensor_core + * + * @code A = B + C * 2; @endcode + * + * @note rank and dimension extents of the tensors in the expressions must + * conform with this tensor_core. + * + * @param expr expression that is evaluated. + */ + template + tensor_core& operator=(const parent_tensor_expression_type& expr) + { + detail::eval(*this, expr); + return *this; + } + + // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) + tensor_core& operator=(tensor_core other) noexcept + { + swap (*this, other); + return *this; + } + + tensor_core& operator=(const_reference v) + { + for(auto i = 0u; i < this->size(); ++i) + this->at(i) = v; + return *this; + } + + /** @brief Element access using a multi-index with bound checking which can + * throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == + * 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r + * < this->rank() + */ + template + [[nodiscard]] inline const_reference at(I1 i1, I2 i2, Is... is) const + { + static_assert (sizeof...(is)+2 == std::tuple_size_v); + const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); + return _tensor.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can + * throw an exception. + * + * @code auto a = A.at(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == + * 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r + * < this->rank() + */ + template + [[nodiscard]] inline reference at(I1 i1, I2 i2, Is... is) + { + static_assert (sizeof...(Is)+2 == std::tuple_size_v); + const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); + return _tensor.at(idx); + } + + /** @brief Element access using a multi-index with bound checking which can + * throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == + * 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r + * < this->rank() + */ + template + [[nodiscard]] inline const_reference operator()(Is... is) const + { + return this->at(is...); + } + + /** @brief Element access using a multi-index with bound checking which can + * throw an exception. + * + * @code auto a = A(i,j,k); @endcode + * + * @param i zero-based index where 0 <= i < this->size() if sizeof...(is) == + * 0, else 0<= i < this->size(0) + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r + * < this->rank() + */ + template [[nodiscard]] inline reference operator()(Is... is) + { + return this->at(is...); + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline const_reference operator[](size_type i) const + { + const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); + return this->_tensor[idx]; + } + + /** @brief Element access using a single index. + * + * @code auto a = A[i]; @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + [[nodiscard]] inline reference operator[](size_type i) + { + const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); + return this->_tensor[idx]; + } + + /** @brief Element access using a single-index with bound checking which can + * throw an exception. + * + * @code auto a = A.at(i); @endcode + * + * @param i zero-based index where 0 <= i < this->size() + */ + template + [[nodiscard]] inline const_reference at(size_type i) const + { + + const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); + return this->_tensor.at(idx); + } + + /** @brief Read tensor element of a tensor \c t with a single-index \c i + * + * @code auto a = t.at(i); @endcode + * + * @param i zero-based index where 0 <= i < t.size() + */ + [[nodiscard]] inline reference at(size_type i) + { + const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); + return this->_tensor.at(idx); + } + + /** @brief Generates a tensor_core index for tensor_core contraction + * + * + * @code auto Ai = A(_i,_j,k); @endcode + * + * @param i placeholder + * @param is zero-based indices where 0 <= is[r] < this->size(r) where 0 < r + * < this->rank() + */ + template + [[nodiscard]] inline decltype(auto) operator()(index::index_type p, index_types... ps) const + { + constexpr auto size = sizeof...(ps) + 1; + static_assert(size == std::tuple_size_v); + return std::make_pair(std::cref(*this), std::make_tuple(p, std::forward(ps)...)); + } + + /** + * @brief Generates a subtensor from a tensor + * + * @code auto Ai = A(span(), span(1,end), span(1,end)); @endcode + * + * @tparam f + * @tparam spans + */ + template + [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) const noexcept + { + return subtensor_type(_tensor, _strides, std::forward(s), std::forward(spans)...); + } + + template + [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) noexcept + { + return subtensor_type(_tensor, _strides, std::forward(s), std::forward(spans)...); + } + +// [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } +// [[nodiscard]] inline auto end () const noexcept -> const_iterator { return _container.end (); } +// [[nodiscard]] inline auto begin () noexcept -> iterator { return _container.begin (); } +// [[nodiscard]] inline auto end () noexcept -> iterator { return _container.end (); } +// [[nodiscard]] inline auto cbegin () const noexcept -> const_iterator { return _container.cbegin (); } +// [[nodiscard]] inline auto cend () const noexcept -> const_iterator { return _container.cend (); } +// [[nodiscard]] inline auto crbegin() const noexcept -> const_reverse_iterator { return _container.crbegin(); } +// [[nodiscard]] inline auto crend () const noexcept -> const_reverse_iterator { return _container.crend (); } +// [[nodiscard]] inline auto rbegin () const noexcept -> const_reverse_iterator { return _container.rbegin (); } +// [[nodiscard]] inline auto rend () const noexcept -> const_reverse_iterator { return _container.rend (); } +// [[nodiscard]] inline auto rbegin () noexcept -> reverse_iterator { return _container.rbegin (); } +// [[nodiscard]] inline auto rend () noexcept -> reverse_iterator { return _container.rend (); } + + [[nodiscard]] inline auto empty () const noexcept { return size() == 0; } + [[nodiscard]] inline auto size () const noexcept { return ublas::product(_extents);} + [[nodiscard]] inline auto size (size_type r) const { return _extents.at(r); } + [[nodiscard]] inline auto rank () const { return return std::tuple_size_v; } + [[nodiscard]] inline auto order () const { return this->rank(); } + + [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } + [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), spans_);} + [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), spans_);} + [[nodiscard]] inline auto const& base () const noexcept { return _tensor.container(); } + +private: + /** + * @brief There might be cases where spans cannot be computed on creation + */ + std::vector _spans; + extents_type _extents; + strides_type _strides; + tensor_type& _tensor; +}; + +} // namespace boost::numeric::ublas + +#endif diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index e25427cbe..a90deac21 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -28,7 +28,7 @@ #include "../tags.hpp" #include "../concepts.hpp" #include "../span.hpp" -#include "subtensor.hpp" +#include "subtensor_dynamic.hpp" #include "tensor_engine.hpp" diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp index 152d09ba5..7d75dadce 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp @@ -28,7 +28,7 @@ #include "../tags.hpp" #include "../concepts.hpp" #include "../span.hpp" -#include "subtensor.hpp" +#include "subtensor_static_rank.hpp" #include "tensor_engine.hpp" #include "tensor_engine.hpp" @@ -424,7 +424,7 @@ template */ template [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) const noexcept { - static_assert(sizeof...(spans)+1 == std::tuple_size_v); + static_assert(sizeof...(spans)+1 == std::tuple_size_v); return subtensor_type(*this, std::forward(s), std::forward(spans)...); } diff --git a/include/boost/numeric/ublas/tensor/traits/slice_traits.hpp b/include/boost/numeric/ublas/tensor/traits/slice_traits.hpp deleted file mode 100644 index 158e11d69..000000000 --- a/include/boost/numeric/ublas/tensor/traits/slice_traits.hpp +++ /dev/null @@ -1,54 +0,0 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google -// - -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_TYPE_TRAITS_SLICE_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_TYPE_TRAITS_SLICE_HPP_ - -#ifdef 0 - -#include -#include -#include - -namespace boost::numeric::ublas::experimental { - - template - struct basic_slice; - - template - struct is_slice : std::false_type{}; - - template - inline static constexpr auto const is_slice_v = is_slice::value; - -} // namespace boost::numeric::ublas::span - -namespace boost::numeric::ublas::experimental { - - template - struct is_slice< basic_slice > : std::true_type{}; - -} // namespace boost::numeric::ublas::span - -namespace boost::numeric::ublas{ - - template - struct is_dynamic< experimental::basic_slice > : std::true_type{}; - - template - struct is_static< experimental::basic_slice > : std::true_type{}; - -} // namespace boost::numeric::ublas - -#endif - -#endif diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index 6b06bd131..03bbb0c7e 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -32,40 +32,40 @@ explicit unit_test_framework ; test-suite boost-ublas-tensor-test : - [ run test_access.cpp - test_algorithms.cpp - test_einstein_notation.cpp - test_expression.cpp - test_expression_evaluation.cpp - test_extents_dynamic.cpp - test_extents_dynamic_rank_static.cpp - test_extents_functions.cpp - test_fixed_rank_expression_evaluation.cpp - test_fixed_rank_extents.cpp - test_fixed_rank_functions.cpp - test_fixed_rank_operators_arithmetic.cpp - test_fixed_rank_operators_comparison.cpp - test_fixed_rank_strides.cpp - test_fixed_rank_tensor.cpp - test_fixed_rank_tensor_matrix_vector.cpp - test_functions.cpp - test_multi_index.cpp - test_multi_index_utility.cpp - test_multiplication.cpp - test_operators_arithmetic.cpp - test_operators_comparison.cpp - test_static_expression_evaluation.cpp - test_static_extents.cpp - test_static_operators_arithmetic.cpp - test_static_operators_comparison.cpp - test_static_strides.cpp - test_static_tensor.cpp - test_static_tensor_matrix_vector.cpp - test_strides.cpp - # test_subtensor.cpp + [ run # test_access.cpp + # test_algorithms.cpp + # test_einstein_notation.cpp + # test_expression.cpp + # test_expression_evaluation.cpp + # test_extents_dynamic.cpp + # test_extents_dynamic_rank_static.cpp + # test_extents_functions.cpp + # test_fixed_rank_expression_evaluation.cpp + # test_fixed_rank_extents.cpp + # test_fixed_rank_functions.cpp + # test_fixed_rank_operators_arithmetic.cpp + # test_fixed_rank_operators_comparison.cpp + # test_fixed_rank_strides.cpp + # test_fixed_rank_tensor.cpp + # test_fixed_rank_tensor_matrix_vector.cpp + # test_functions.cpp + # test_multi_index.cpp + # test_multi_index_utility.cpp + # test_multiplication.cpp + # test_operators_arithmetic.cpp + # test_operators_comparison.cpp + # test_static_expression_evaluation.cpp + # test_static_extents.cpp + # test_static_operators_arithmetic.cpp + # test_static_operators_comparison.cpp + # test_static_strides.cpp + # test_static_tensor.cpp + # test_static_tensor_matrix_vector.cpp + # test_strides.cpp + test_subtensor.cpp test_subtensor_utility.cpp test_tensor.cpp - test_tensor_matrix_vector.cpp + # test_tensor_matrix_vector.cpp unit_test_framework : : diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index f0d051dfa..c4c7b3e09 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -55,7 +55,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixt using value_type = typename value::first_type; using layout_type = typename value::second_type; using tensor_type = ublas::tensor_dynamic; - using subtensor_type = ublas::tensor_core>; + using subtensor_type = ublas::subtensor; auto check = [](auto const& e) { @@ -87,7 +87,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) using value_type = typename value::first_type; using layout_type = typename value::second_type; using tensor_type = ublas::tensor_dynamic; - using subtensor_type = ublas::tensor_core>; + using subtensor_type = ublas::subtensor; using span = ublas::span; @@ -261,9 +261,8 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, f } -#if 0 -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_types, fixture_shape ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_copy_ctor_layout, value, test_types, fixture_shape ) { using namespace boost::numeric; using value_type = typename value::first_type; @@ -294,7 +293,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_ctor_layout, value, test_typ } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_copy_move_ctor, value, test_types, fixture ) { using namespace boost::numeric; using value_type = typename value::first_type; @@ -324,7 +323,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_copy_move_ctor, value, test_types } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_types, fixture ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_ctor_extents_init, value, test_types, fixture ) { using namespace boost::numeric; using value_type = typename value::first_type; @@ -347,7 +346,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_init, value, test_ty -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_ctor_extents_array, value, test_types, fixture) { using namespace boost::numeric; using value_type = typename value::first_type; @@ -373,7 +372,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_ctor_extents_array, value, test_t -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_read_write_single_index_access, value, test_types, fixture) { using namespace boost::numeric; using value_type = typename value::first_type; @@ -395,7 +394,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_single_index_access, va -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_read_write_multi_index_access_at, value, test_types, fixture) { using namespace boost::numeric; using value_type = typename value::first_type; @@ -483,7 +482,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_read_write_multi_index_access_at, -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_reshape, value, test_types, fixture) { using namespace boost::numeric; using value_type = typename value::first_type; @@ -519,7 +518,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_reshape, value, test_types, fixtu -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_swap, value, test_types, fixture) { using namespace boost::numeric; using value_type = typename value::first_type; @@ -550,38 +549,10 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_swap, value, test_types, fixture) BOOST_CHECK_EQUAL ( r.rank() , e_t.size() ); BOOST_CHECK ( r.extents() == e_t ); - } } } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_standard_iterator, value, test_types, fixture) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::tensor; - - for(auto const& e : extents) - { - auto v = value_type {} + value_type{1}; - auto t = tensor_type{e, v}; - - BOOST_CHECK_EQUAL( std::distance(t.begin(), t.end ()), t.size() ); - BOOST_CHECK_EQUAL( std::distance(t.rbegin(), t.rend()), t.size() ); - - BOOST_CHECK_EQUAL( std::distance(t.cbegin(), t.cend ()), t.size() ); - BOOST_CHECK_EQUAL( std::distance(t.crbegin(), t.crend()), t.size() ); - - if(t.size() > 0) { - BOOST_CHECK( t.data() == std::addressof( *t.begin () ) ) ; - BOOST_CHECK( t.data() == std::addressof( *t.cbegin() ) ) ; - } - } -} - -#endif - BOOST_AUTO_TEST_SUITE_END() From 62d241f1bd6ad8632ffe85d34995f5d090290584 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Thu, 19 Aug 2021 23:20:50 +0530 Subject: [PATCH 23/40] Minor Fixes --- .../ublas/tensor/tensor/subtensor_dynamic.hpp | 2 +- .../tensor/tensor/subtensor_static_rank.hpp | 2 +- test/tensor/Jamfile | 62 +++++++++---------- 3 files changed, 33 insertions(+), 33 deletions(-) diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index d7e549a4b..eaad114f6 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -7,7 +7,7 @@ //: -/// \file subtensor.hpp Definition for the subtensor template class +/// \file subtensor_dynamic.hpp Definition for the subtensor template class #ifndef BOOST_UBLAS_SUBTENSOR_DYNAMIC_HPP #define BOOST_UBLAS_SUBTENSOR_DYNAMIC_HPP diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp index 1dfbe0b32..6c36aba82 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -7,7 +7,7 @@ //: -/// \file subtensor.hpp Definition for the subtensor template class +/// \file subtensor_static_rank.hpp Definition for the subtensor template class #ifndef BOOST_UBLAS_SUBTENSOR_STATIC_RANK_HPP #define BOOST_UBLAS_SUBTENSOR_STATIC_RANK_HPP diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index 03bbb0c7e..edf082ae7 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -32,40 +32,40 @@ explicit unit_test_framework ; test-suite boost-ublas-tensor-test : - [ run # test_access.cpp - # test_algorithms.cpp - # test_einstein_notation.cpp - # test_expression.cpp - # test_expression_evaluation.cpp - # test_extents_dynamic.cpp - # test_extents_dynamic_rank_static.cpp - # test_extents_functions.cpp - # test_fixed_rank_expression_evaluation.cpp - # test_fixed_rank_extents.cpp - # test_fixed_rank_functions.cpp - # test_fixed_rank_operators_arithmetic.cpp - # test_fixed_rank_operators_comparison.cpp - # test_fixed_rank_strides.cpp - # test_fixed_rank_tensor.cpp - # test_fixed_rank_tensor_matrix_vector.cpp - # test_functions.cpp - # test_multi_index.cpp - # test_multi_index_utility.cpp - # test_multiplication.cpp - # test_operators_arithmetic.cpp - # test_operators_comparison.cpp - # test_static_expression_evaluation.cpp - # test_static_extents.cpp - # test_static_operators_arithmetic.cpp - # test_static_operators_comparison.cpp - # test_static_strides.cpp - # test_static_tensor.cpp - # test_static_tensor_matrix_vector.cpp - # test_strides.cpp + [ run test_access.cpp + test_algorithms.cpp + test_einstein_notation.cpp + test_expression.cpp + test_expression_evaluation.cpp + test_extents_dynamic.cpp + test_extents_dynamic_rank_static.cpp + test_extents_functions.cpp + test_fixed_rank_expression_evaluation.cpp + test_fixed_rank_extents.cpp + test_fixed_rank_functions.cpp + test_fixed_rank_operators_arithmetic.cpp + test_fixed_rank_operators_comparison.cpp + test_fixed_rank_strides.cpp + test_fixed_rank_tensor.cpp + test_fixed_rank_tensor_matrix_vector.cpp + test_functions.cpp + test_multi_index.cpp + test_multi_index_utility.cpp + test_multiplication.cpp + test_operators_arithmetic.cpp + test_operators_comparison.cpp + test_static_expression_evaluation.cpp + test_static_extents.cpp + test_static_operators_arithmetic.cpp + test_static_operators_comparison.cpp + test_static_strides.cpp + test_static_tensor.cpp + test_static_tensor_matrix_vector.cpp + test_strides.cpp test_subtensor.cpp test_subtensor_utility.cpp test_tensor.cpp - # test_tensor_matrix_vector.cpp + test_tensor_matrix_vector.cpp unit_test_framework : : From a892f6d9028a7c7c896d42a1a32efbc6641eab58 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Fri, 20 Aug 2021 12:59:37 +0530 Subject: [PATCH 24/40] Minor fixes --- examples/tensor/Jamfile | 14 +- examples/tensor/access_subtensor.cpp | 157 +++++++++--------- include/boost/numeric/ublas/tensor/access.hpp | 4 +- include/boost/numeric/ublas/tensor/span.hpp | 9 + .../ublas/tensor/tensor/subtensor_dynamic.hpp | 88 +++++++--- .../tensor/tensor/subtensor_static_rank.hpp | 7 +- .../ublas/tensor/tensor/tensor_dynamic.hpp | 20 ++- .../tensor/tensor/tensor_static_rank.hpp | 4 +- 8 files changed, 181 insertions(+), 122 deletions(-) diff --git a/examples/tensor/Jamfile b/examples/tensor/Jamfile index d8dcfcfff..5486e4fbd 100644 --- a/examples/tensor/Jamfile +++ b/examples/tensor/Jamfile @@ -19,11 +19,11 @@ project boost-ublas-tensor-example [ requires cxx17_if_constexpr ] ; -exe access_tensor : access_tensor.cpp ; -exe simple_expressions : simple_expressions.cpp ; -exe multiply_tensors_product_function : multiply_tensors_product_function.cpp ; -exe multiply_tensors_einstein_notation : multiply_tensors_einstein_notation.cpp ; -exe instantiate_tensor : instantiate_tensor.cpp ; -exe expressions_subtensor : expressions_subtensor.cpp ; -exe instantiate_subtensor : instantiate_subtensor.cpp ; +# exe access_tensor : access_tensor.cpp ; +# exe simple_expressions : simple_expressions.cpp ; +# exe multiply_tensors_product_function : multiply_tensors_product_function.cpp ; +# exe multiply_tensors_einstein_notation : multiply_tensors_einstein_notation.cpp ; +# exe instantiate_tensor : instantiate_tensor.cpp ; +# exe expressions_subtensor : expressions_subtensor.cpp ; +# exe instantiate_subtensor : instantiate_subtensor.cpp ; exe access_subtensor : access_subtensor.cpp ; diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index de316085b..c679ae0ff 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -11,7 +11,7 @@ // #include -#include +// #include #include @@ -25,7 +25,7 @@ int main() using layout = ublas::layout::first_order; // storage format using tensor = ublas::tensor_dynamic; using span = ublas::span<>; -// constexpr auto ones = ublas::ones{}; + constexpr auto ones = ublas::ones{}; constexpr auto zeros = ublas::zeros{}; @@ -33,90 +33,93 @@ int main() // tensor A stores single-precision floating-point number according // to the first-order storage format - tensor t1 = zeros(3,4,2); - auto A = t1(span(), span(), span()); + tensor t1 = ones(3,2); + auto A = t1(span(1,2), span()); + tensor t2 = ones(2,2); - // initializes the tensor with increasing values along the first-index - // using a single index. - auto vf = 1.0f; - for(auto i = 0u; i < A.size(); ++i, vf += 1.0f) - A[i] = vf; + t1(0,0) = t1(1,1) = 2; - tensor t2 = A; + for(auto i = 0u; i < A.size(); ++i) + std::cout << A[i] << " "; - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "A=" << A << ";" << std::endl << std::endl; - std::cout << "t1=" << t1 << ";" << std::endl << std::endl; - } catch (const std::exception& e) { - std::cerr << "Cought exception " << e.what(); - std::cerr << "in the main function of access-tensor." << std::endl; - } - - - try { - using value = std::complex; - using layout = ublas::layout::last_order; // storage format - using tensor = ublas::tensor_dynamic; - using shape = typename tensor::extents_type; - using span = ublas::span<>; - constexpr auto zeros = ublas::zeros{}; - - - // creates a four-dimensional tensor with extents 5,4,3 and 2 - // tensor A stores complex floating-point extended double precision numbers - // according to the last-order storage format - // and initializes it with the default value. + std::cout << std::endl << std::endl; - //NOLINTNEXTLINE - tensor t1 = zeros(5,4,3,2); - auto B = t1(span(), span(), span(), span()); - - // initializes the tensor with increasing values along the last-index - // using a single-index - auto vc = value(0,0); - for(auto i = 0u; i < B.size(); ++i, vc += value(1,1)) - B[i] = vc; + tensor t3 = t1 * A; // formatted output std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "B=" << B << ";" << std::endl << std::endl; - - auto C = tensor(B.extents()); - // computes the complex conjugate of elements of B - // using multi-index notation. - for(auto i = 0u; i < B.size(0); ++i) - for(auto j = 0u; j < B.size(1); ++j) - for(auto k = 0u; k < B.size(2); ++k) - for(auto l = 0u; l < B.size(3); ++l) - C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); - - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "C=" << C << ";" << std::endl << std::endl; - - - // // computes the complex conjugate of elements of B - // // using iterators. - auto D = tensor(B.extents()); - // // std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); - // std::cout << "% --------------------------- " << std::endl; - // std::cout << "% --------------------------- " << std::endl << std::endl; - // std::cout << "D=" << D << ";" << std::endl << std::endl; - - // reshaping tensors. - auto new_extents = B.extents().base(); - std::next_permutation( new_extents.begin(), new_extents.end() ); - auto E = reshape( D, shape(new_extents) ); - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "E=" << E << ";" << std::endl << std::endl; - - + std::cout << "t1=" << A << ";" << std::endl << std::endl; + std::cout << "t2=" << t2 << ";" << std::endl << std::endl; + std::cout << "t3=" << t3 << ";" << std::endl << std::endl; } catch (const std::exception& e) { std::cerr << "Cought exception " << e.what(); - std::cerr << "in the main function of access-tensor." << std::endl; + std::cerr << " in the main function of access-tensor." << std::endl; } + + + // try { + // using value = std::complex; + // using layout = ublas::layout::last_order; // storage format + // using tensor = ublas::tensor_dynamic; + // using shape = typename tensor::extents_type; + // using span = ublas::span<>; + // constexpr auto zeros = ublas::zeros{}; + + + // // creates a four-dimensional tensor with extents 5,4,3 and 2 + // // tensor A stores complex floating-point extended double precision numbers + // // according to the last-order storage format + // // and initializes it with the default value. + + // //NOLINTNEXTLINE + // tensor t1 = zeros(5,4,3,2); + // auto B = t1(span(), span(), span(), span()); + + // // initializes the tensor with increasing values along the last-index + // // using a single-index + // auto vc = value(0,0); + // for(auto i = 0u; i < B.size(); ++i, vc += value(1,1)) + // B[i] = vc; + + // // formatted output + // std::cout << "% --------------------------- " << std::endl; + // std::cout << "% --------------------------- " << std::endl << std::endl; + // std::cout << "B=" << B << ";" << std::endl << std::endl; + + // auto C = tensor(B.extents()); + // // computes the complex conjugate of elements of B + // // using multi-index notation. + // for(auto i = 0u; i < B.size(0); ++i) + // for(auto j = 0u; j < B.size(1); ++j) + // for(auto k = 0u; k < B.size(2); ++k) + // for(auto l = 0u; l < B.size(3); ++l) + // C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); + + // std::cout << "% --------------------------- " << std::endl; + // std::cout << "% --------------------------- " << std::endl << std::endl; + // std::cout << "C=" << C << ";" << std::endl << std::endl; + + + // // // computes the complex conjugate of elements of B + // // // using iterators. + // auto D = tensor(B.extents()); + // // // std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); + // // std::cout << "% --------------------------- " << std::endl; + // // std::cout << "% --------------------------- " << std::endl << std::endl; + // // std::cout << "D=" << D << ";" << std::endl << std::endl; + + // // reshaping tensors. + // auto new_extents = B.extents().base(); + // std::next_permutation( new_extents.begin(), new_extents.end() ); + // auto E = reshape( D, shape(new_extents) ); + // std::cout << "% --------------------------- " << std::endl; + // std::cout << "% --------------------------- " << std::endl << std::endl; + // std::cout << "E=" << E << ";" << std::endl << std::endl; + + + // } catch (const std::exception& e) { + // std::cerr << "Cought exception " << e.what(); + // std::cerr << "in the main function of access-tensor." << std::endl; + // } } diff --git a/include/boost/numeric/ublas/tensor/access.hpp b/include/boost/numeric/ublas/tensor/access.hpp index 525282fd9..98d81de4f 100644 --- a/include/boost/numeric/ublas/tensor/access.hpp +++ b/include/boost/numeric/ublas/tensor/access.hpp @@ -184,9 +184,9 @@ constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /* * @param v begin input iterator of a container with subtensor strides of length std::distance(w,wp) or greater */ template -constexpr inline auto compute_single_index(std::size_t jv, InputIt1 w, InputIt1 wp, InputIt2 v) +constexpr inline auto compute_single_index(std::size_t jv, InputIt1 w, InputIt1 wp, InputIt2 v, std::size_t offset) { - return std::inner_product(w,wp,v,0ul, + return std::inner_product(w,wp,v,offset, std::plus<>{}, [&jv](auto ww, auto vv) { auto k=jv/vv; jv-=vv*k; return ww*k; } ); diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 8bc36d0f7..4b195f068 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -109,6 +109,15 @@ class span return (last_-first_) / step_ + value_type(1); } + inline span operator()(const span &rhs) const + { + auto const& lhs = *this; + return span( + rhs.first_*lhs.step_ + lhs.first_, + lhs.step_ *rhs.step_, + rhs.last_ *lhs.step_ + lhs.first_ ); + } + protected: value_type first_, step_, last_ ; diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index eaad114f6..f596e6af1 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -26,9 +26,8 @@ #include "../traits/read_write_traits.hpp" #include "../type_traits.hpp" #include "../subtensor_utility.hpp" -#include "../tensor.hpp" #include "subtensor_engine.hpp" -#include "tensor_engine.hpp" +#include "tensor_dynamic.hpp" #include @@ -38,7 +37,8 @@ template class tensor_core>> : public detail::tensor_expression< tensor_core>>, - tensor_core>>> { + tensor_core>> + > { public: using tensor_type = tensor_dynamic; using engine_type = subtensor_engine; @@ -103,6 +103,8 @@ class tensor_core>> , _spans() , _extents(t.extents()) , _strides(t.strides()) + , _span_strides(t.strides()) + , _offset(size_type(0)) , _tensor(t) { } @@ -112,10 +114,22 @@ class tensor_core>> : tensor_expression_type{} , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) , _extents{} - , _strides(detail::to_span_strides(t.strides(), _spans)) + , _strides(ublas::to_strides(_extents,layout_type{})) + , _span_strides(detail::to_span_strides(t.strides(), _spans)) + , _offset{detail::to_offset(t.strides(), _spans)} , _tensor(t) { _extents = detail::to_extents(_spans); + for (int i = 0; i < (int) _extents.size(); i++) { + std::cout << _extents[i] << " "; + } + std::cout << std::endl; + for (int i = 0; i < (int) _span_strides.size(); i++) { + std::cout << _span_strides[i] << " "; + } + std::cout << std::endl; + std::cout << detail::to_offset(t.strides(), _spans) << std::endl; + std::cout << std::endl; } tensor_core(tensor_core&& v) @@ -123,7 +137,9 @@ class tensor_core>> , _spans (std::move(v._spans)) , _extents(std::move(v._extents)) , _strides(std::move(v._strides)) - , _tensor (v._tensor) + , _span_strides(std::move(v._span_strides)) + , _offset(std::move(v._offset)) + , _tensor(std::move(v._tensor)) { _extents = detail::to_extents(_spans); } @@ -168,6 +184,7 @@ class tensor_core>> // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) tensor_core& operator=(tensor_core other) noexcept { + // TODO implement swap swap (*this, other); return *this; } @@ -198,8 +215,8 @@ class tensor_core>> "Cannot access tensor with multi-index. " "Number of provided indices does not match with tensor order."); } - const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); - return _tensor.at(idx); + const auto idx = ublas::detail::to_index(_span_strides, i1, i2, is...); + return _tensor[idx + _offset]; } /** @brief Element access using a multi-index with bound checking which can @@ -221,8 +238,8 @@ class tensor_core>> "Cannot access tensor with multi-index." "Number of provided indices does not match with tensor order."); } - const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); - return _tensor.at(idx); + const auto idx = ublas::detail::to_index(_span_strides, i1, i2, is...); + return _tensor[idx + _offset]; } /** @brief Element access using a multi-index with bound checking which can @@ -264,8 +281,8 @@ class tensor_core>> */ [[nodiscard]] inline const_reference operator[](size_type i) const { - const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); - return this->_tensor[idx]; + const auto idx = detail::compute_single_index(i, _span_strides.begin(), _span_strides.end(), _strides.begin(), _offset); + return _tensor[idx]; } /** @brief Element access using a single index. @@ -276,8 +293,9 @@ class tensor_core>> */ [[nodiscard]] inline reference operator[](size_type i) { - const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); - return this->_tensor[idx]; + const auto idx = detail::compute_single_index(i, _span_strides.begin(), _span_strides.end(), _strides.begin(), _offset); + std::cout << "idx:" << i << "->" << idx << std::endl; + return _tensor[idx]; } /** @brief Element access using a single-index with bound checking which can @@ -290,9 +308,8 @@ class tensor_core>> template [[nodiscard]] inline const_reference at(size_type i) const { - - const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); - return this->_tensor.at(idx); + const auto idx = detail::compute_single_index(i, _span_strides.begin(), _span_strides.end(), _strides.begin(), _offset); + return _tensor[idx]; } /** @brief Read tensor element of a tensor \c t with a single-index \c i @@ -303,8 +320,8 @@ class tensor_core>> */ [[nodiscard]] inline reference at(size_type i) { - const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); - return this->_tensor.at(idx); + const auto idx = detail::compute_single_index(i, _span_strides.begin(), _span_strides.end(), _strides.begin(), _offset); + return _tensor[idx]; } /** @brief Generates a tensor_core index for tensor_core contraction @@ -322,7 +339,7 @@ class tensor_core>> constexpr auto size = sizeof...(ps) + 1; if (size != this->order()) { throw std::invalid_argument( - "boost::numeric::ublas::tensor_core : " + "boost::numeric::ublas::tensor_core : " "Cannot multiply using Einstein notation. " "Number of provided indices does not match with tensor order."); } @@ -340,13 +357,27 @@ class tensor_core>> template [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) const noexcept { - return subtensor_type(_tensor, _strides, std::forward(s), std::forward(spans)...); + constexpr auto size = sizeof...(spans)+1; + if(size != this->order()){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core : " + "Cannot create subtensor " + "Number of provided indices does not match with tensor order."); + } + // TODO find way to convert spans + return subtensor_type(_tensor, std::forward(s), std::forward(spans)...); } template [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) noexcept { - return subtensor_type(_tensor, _strides, std::forward(s), std::forward(spans)...); + constexpr auto size = sizeof...(spans)+1; + if(size != this->order()){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core : " + "Cannot create subtensor " + "Number of provided indices does not match with tensor order."); + } + // TODO find way to convert spans + return subtensor_type(_tensor, std::forward(s), std::forward(spans)...); } // [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } @@ -370,18 +401,21 @@ class tensor_core>> [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } - [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), spans_);} - [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), spans_);} - [[nodiscard]] inline auto const& base () const noexcept { return _tensor.container(); } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + _offset; } + [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + _offset; } + // [[nodiscard]] inline auto const& base () const noexcept { return _tensor.container(); } private: + /** * @brief There might be cases where spans cannot be computed on creation */ std::vector _spans; - extents_type _extents; - strides_type _strides; - tensor_type& _tensor; + extents_type _extents; + strides_type _strides; + strides_type _span_strides; + std::size_t _offset; + tensor_type& _tensor; }; template diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp index 6c36aba82..d2dfa82a1 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -26,8 +26,9 @@ #include "../traits/read_write_traits.hpp" #include "../type_traits.hpp" #include "../subtensor_utility.hpp" + #include "subtensor_engine.hpp" -#include "tensor_engine.hpp" +#include "tensor_static_rank.hpp" #include @@ -357,8 +358,8 @@ class tensor_core>> [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } - [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), spans_);} - [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), spans_);} + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), _spans);} + [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), _spans);} [[nodiscard]] inline auto const& base () const noexcept { return _tensor.container(); } private: diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index a90deac21..63b0bee0d 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -28,7 +28,7 @@ #include "../tags.hpp" #include "../concepts.hpp" #include "../span.hpp" -#include "subtensor_dynamic.hpp" +#include "subtensor_engine.hpp" #include "tensor_engine.hpp" @@ -466,12 +466,24 @@ template * @tparam spans */ template - [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) const noexcept { - return subtensor_type(*this, std::forward(s), std::forward(spans)...); + [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) const { + constexpr auto size = sizeof...(spans)+1; + if(size != this->order()){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core : " + "Cannot create subtensor " + "Number of provided indices does not match with tensor order."); + } + return subtensor_type(*this, std::forward(s), std::forward(spans)...); } template - [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) noexcept { + [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) { + constexpr auto size = sizeof...(spans)+1; + if(size != this->order()){ + throw std::invalid_argument("boost::numeric::ublas::tensor_core : " + "Cannot create subtensor " + "Number of provided indices does not match with tensor order."); + } return subtensor_type(*this, std::forward(s), std::forward(spans)...); } diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp index 7d75dadce..93391f115 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp @@ -28,13 +28,13 @@ #include "../tags.hpp" #include "../concepts.hpp" #include "../span.hpp" -#include "subtensor_static_rank.hpp" -#include "tensor_engine.hpp" +#include "subtensor_engine.hpp" #include "tensor_engine.hpp" + namespace boost::numeric::ublas { template From faf1acca445a562bcbeb9614b975bbeb619dbf9c Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Fri, 20 Aug 2021 13:46:40 +0530 Subject: [PATCH 25/40] Fix indexing --- examples/tensor/access_subtensor.cpp | 18 +++++++++++------- .../ublas/tensor/tensor/subtensor_dynamic.hpp | 19 ++++++++++++------- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index c679ae0ff..b25e2fa94 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -33,23 +33,27 @@ int main() // tensor A stores single-precision floating-point number according // to the first-order storage format - tensor t1 = ones(3,2); - auto A = t1(span(1,2), span()); + tensor t1 = ones(4,3); + auto A = t1(span(1,2,3), span(1,2)); tensor t2 = ones(2,2); t1(0,0) = t1(1,1) = 2; - for(auto i = 0u; i < A.size(); ++i) - std::cout << A[i] << " "; - + std::cout << "hello" << std::endl; + for (auto i = 0u; i < A.size(0); i++) { + for (auto j = 0u; j < A.size(1); j++) + std::cout << A(i,j) << " "; + std::cout << std::endl; + } std::cout << std::endl << std::endl; - tensor t3 = t1 * A; + tensor t3 = t2 * A; // formatted output std::cout << "% --------------------------- " << std::endl; std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "t1=" << A << ";" << std::endl << std::endl; + std::cout << "t1=" << t1 << ";" << std::endl << std::endl; + std::cout << "A=" << A << ";" << std::endl << std::endl; std::cout << "t2=" << t2 << ";" << std::endl << std::endl; std::cout << "t3=" << t3 << ";" << std::endl << std::endl; } catch (const std::exception& e) { diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index f596e6af1..f66018107 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -114,12 +114,13 @@ class tensor_core>> : tensor_expression_type{} , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) , _extents{} - , _strides(ublas::to_strides(_extents,layout_type{})) + , _strides{} , _span_strides(detail::to_span_strides(t.strides(), _spans)) , _offset{detail::to_offset(t.strides(), _spans)} , _tensor(t) { _extents = detail::to_extents(_spans); + _strides = ublas::to_strides(_extents,layout_type{}); for (int i = 0; i < (int) _extents.size(); i++) { std::cout << _extents[i] << " "; } @@ -128,8 +129,11 @@ class tensor_core>> std::cout << _span_strides[i] << " "; } std::cout << std::endl; - std::cout << detail::to_offset(t.strides(), _spans) << std::endl; + for (int i = 0; i < (int) _strides.size(); i++) { + std::cout << _strides[i] << " "; + } std::cout << std::endl; + std::cout << _offset << std::endl; } tensor_core(tensor_core&& v) @@ -281,7 +285,7 @@ class tensor_core>> */ [[nodiscard]] inline const_reference operator[](size_type i) const { - const auto idx = detail::compute_single_index(i, _span_strides.begin(), _span_strides.end(), _strides.begin(), _offset); + const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); return _tensor[idx]; } @@ -293,8 +297,9 @@ class tensor_core>> */ [[nodiscard]] inline reference operator[](size_type i) { - const auto idx = detail::compute_single_index(i, _span_strides.begin(), _span_strides.end(), _strides.begin(), _offset); - std::cout << "idx:" << i << "->" << idx << std::endl; + std::cout << "idx:" << i; + const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + std::cout << "->" << idx << std::endl; return _tensor[idx]; } @@ -308,7 +313,7 @@ class tensor_core>> template [[nodiscard]] inline const_reference at(size_type i) const { - const auto idx = detail::compute_single_index(i, _span_strides.begin(), _span_strides.end(), _strides.begin(), _offset); + const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); return _tensor[idx]; } @@ -320,7 +325,7 @@ class tensor_core>> */ [[nodiscard]] inline reference at(size_type i) { - const auto idx = detail::compute_single_index(i, _span_strides.begin(), _span_strides.end(), _strides.begin(), _offset); + const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); return _tensor[idx]; } From 22eed7c6e15685739638868a86d921d4ae16de9e Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Fri, 20 Aug 2021 15:16:23 +0530 Subject: [PATCH 26/40] Improve example --- examples/tensor/access_subtensor.cpp | 32 +++++++++++++++++----------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index b25e2fa94..7563a6663 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -33,20 +33,28 @@ int main() // tensor A stores single-precision floating-point number according // to the first-order storage format - tensor t1 = ones(4,3); - auto A = t1(span(1,2,3), span(1,2)); - tensor t2 = ones(2,2); - - t1(0,0) = t1(1,1) = 2; - - std::cout << "hello" << std::endl; - for (auto i = 0u; i < A.size(0); i++) { - for (auto j = 0u; j < A.size(1); j++) - std::cout << A(i,j) << " "; - std::cout << std::endl; + tensor t1 = ones(3,3,2); + int cnt = 0; + for (auto i = 0u; i < t1.size(0); i++) { + for (auto j = 0u; j < t1.size(1); j++) { + for (auto k = 0u; k < t1.size(2); k++) { + t1(i,j,k) = cnt++; + } + } } - std::cout << std::endl << std::endl; + auto A = t1(span(0,1), span(0,1), span()); + tensor t2 = ones(2,2,2); + std::cout << "% --------------------------- " << std::endl; + std::cout << A(0,0,0) << std::endl; + std::cout << A(0,0,1) << std::endl; + std::cout << A(0,1,0) << std::endl; + std::cout << A(0,1,1) << std::endl; + std::cout << A(1,0,0) << std::endl; + std::cout << A(1,0,1) << std::endl; + std::cout << A(1,1,0) << std::endl; + std::cout << A(1,1,1) << std::endl; + std::cout << "% --------------------------- " << std::endl; tensor t3 = t2 * A; // formatted output From 517e19902f4f1f5f048473f8afaf2f1837e356dc Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Fri, 20 Aug 2021 15:55:52 +0530 Subject: [PATCH 27/40] fix ostream operator --- .../boost/numeric/ublas/tensor/ostream.hpp | 35 +++++++++++++++++++ .../ublas/tensor/tensor/subtensor_dynamic.hpp | 1 + 2 files changed, 36 insertions(+) diff --git a/include/boost/numeric/ublas/tensor/ostream.hpp b/include/boost/numeric/ublas/tensor/ostream.hpp index 2ce7940cc..15662c0a9 100644 --- a/include/boost/numeric/ublas/tensor/ostream.hpp +++ b/include/boost/numeric/ublas/tensor/ostream.hpp @@ -84,6 +84,9 @@ namespace boost::numeric::ublas template class tensor_core; +template +struct subtensor_engine; + } //namespace boost::numeric::ublas @@ -117,4 +120,36 @@ std::ostream& operator << (std::ostream& out, class boost::numeric::ublas::tenso return out; } +template +std::ostream& operator << (std::ostream& out, + class boost::numeric::ublas::tensor_core< + boost::numeric::ublas::subtensor_engine> const& t) +{ + + namespace ublas = boost::numeric::ublas; + + auto const& n = t.extents(); + auto const& w = t.span_strides(); + + if(is_scalar(n)){ + out << '['; + ublas::detail::print(out,t[0]); + out << ']'; + } + else if(is_vector(n)) { + const auto& cat = n.at(0) > n.at(1) ? ';' : ','; + out << '['; + for(auto i = 0u; i < t.size()-1; ++i){ + ublas::detail::print(out,t[i]); + out << cat << ' '; + } + ublas::detail::print(out,t[t.size()-1]); + out << ']'; + } + else{ + boost::numeric::ublas::detail::print(out, t.rank()-1, t.data(), w.data(), n.data()); + } + return out; +} + #endif diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index f66018107..6516aae85 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -405,6 +405,7 @@ class tensor_core>> [[nodiscard]] inline auto order () const { return this->rank(); } [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } + [[nodiscard]] inline auto const& span_strides () const noexcept { return _span_strides; } [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + _offset; } [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + _offset; } From e33255ea97c54950660fca3647669e1b69b8a0c2 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Fri, 20 Aug 2021 16:28:42 +0530 Subject: [PATCH 28/40] Improve examples --- examples/tensor/Jamfile | 14 +-- examples/tensor/access_subtensor.cpp | 134 +++++++++++++-------------- test/tensor/Jamfile | 2 +- 3 files changed, 74 insertions(+), 76 deletions(-) diff --git a/examples/tensor/Jamfile b/examples/tensor/Jamfile index 5486e4fbd..d8dcfcfff 100644 --- a/examples/tensor/Jamfile +++ b/examples/tensor/Jamfile @@ -19,11 +19,11 @@ project boost-ublas-tensor-example [ requires cxx17_if_constexpr ] ; -# exe access_tensor : access_tensor.cpp ; -# exe simple_expressions : simple_expressions.cpp ; -# exe multiply_tensors_product_function : multiply_tensors_product_function.cpp ; -# exe multiply_tensors_einstein_notation : multiply_tensors_einstein_notation.cpp ; -# exe instantiate_tensor : instantiate_tensor.cpp ; -# exe expressions_subtensor : expressions_subtensor.cpp ; -# exe instantiate_subtensor : instantiate_subtensor.cpp ; +exe access_tensor : access_tensor.cpp ; +exe simple_expressions : simple_expressions.cpp ; +exe multiply_tensors_product_function : multiply_tensors_product_function.cpp ; +exe multiply_tensors_einstein_notation : multiply_tensors_einstein_notation.cpp ; +exe instantiate_tensor : instantiate_tensor.cpp ; +exe expressions_subtensor : expressions_subtensor.cpp ; +exe instantiate_subtensor : instantiate_subtensor.cpp ; exe access_subtensor : access_subtensor.cpp ; diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index 7563a6663..d0635aa5c 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -11,7 +11,7 @@ // #include -// #include +#include #include @@ -26,7 +26,6 @@ int main() using tensor = ublas::tensor_dynamic; using span = ublas::span<>; constexpr auto ones = ublas::ones{}; - constexpr auto zeros = ublas::zeros{}; // creates a three-dimensional tensor with extents 3,4 and 2 @@ -42,8 +41,8 @@ int main() } } } - auto A = t1(span(0,1), span(0,1), span()); - tensor t2 = ones(2,2,2); + auto A = t1(span(1), span(0,2,2), span()); + tensor t2 = ones(1,2,2); std::cout << "% --------------------------- " << std::endl; std::cout << A(0,0,0) << std::endl; @@ -69,69 +68,68 @@ int main() std::cerr << " in the main function of access-tensor." << std::endl; } + try { + using value = std::complex; + using layout = ublas::layout::last_order; // storage format + using tensor = ublas::tensor_dynamic; + using shape = typename tensor::extents_type; + using span = ublas::span<>; + constexpr auto zeros = ublas::zeros{}; + + + // creates a four-dimensional tensor with extents 5,4,3 and 2 + // tensor A stores complex floating-point extended double precision numbers + // according to the last-order storage format + // and initializes it with the default value. + + //NOLINTNEXTLINE + tensor t1 = zeros(5,4,3,2); + auto B = t1(span(), span(), span(), span()); + + // initializes the tensor with increasing values along the last-index + // using a single-index + auto vc = value(0,0); + for(auto i = 0u; i < B.size(); ++i, vc += value(1,1)) + B[i] = vc; + + // formatted output + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "B=" << B << ";" << std::endl << std::endl; + + auto C = tensor(B.extents()); + // computes the complex conjugate of elements of B + // using multi-index notation. + for(auto i = 0u; i < B.size(0); ++i) + for(auto j = 0u; j < B.size(1); ++j) + for(auto k = 0u; k < B.size(2); ++k) + for(auto l = 0u; l < B.size(3); ++l) + C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); + + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "C=" << C << ";" << std::endl << std::endl; - // try { - // using value = std::complex; - // using layout = ublas::layout::last_order; // storage format - // using tensor = ublas::tensor_dynamic; - // using shape = typename tensor::extents_type; - // using span = ublas::span<>; - // constexpr auto zeros = ublas::zeros{}; - - - // // creates a four-dimensional tensor with extents 5,4,3 and 2 - // // tensor A stores complex floating-point extended double precision numbers - // // according to the last-order storage format - // // and initializes it with the default value. - - // //NOLINTNEXTLINE - // tensor t1 = zeros(5,4,3,2); - // auto B = t1(span(), span(), span(), span()); - - // // initializes the tensor with increasing values along the last-index - // // using a single-index - // auto vc = value(0,0); - // for(auto i = 0u; i < B.size(); ++i, vc += value(1,1)) - // B[i] = vc; - - // // formatted output - // std::cout << "% --------------------------- " << std::endl; - // std::cout << "% --------------------------- " << std::endl << std::endl; - // std::cout << "B=" << B << ";" << std::endl << std::endl; - - // auto C = tensor(B.extents()); - // // computes the complex conjugate of elements of B - // // using multi-index notation. - // for(auto i = 0u; i < B.size(0); ++i) - // for(auto j = 0u; j < B.size(1); ++j) - // for(auto k = 0u; k < B.size(2); ++k) - // for(auto l = 0u; l < B.size(3); ++l) - // C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); - - // std::cout << "% --------------------------- " << std::endl; - // std::cout << "% --------------------------- " << std::endl << std::endl; - // std::cout << "C=" << C << ";" << std::endl << std::endl; - - - // // // computes the complex conjugate of elements of B - // // // using iterators. - // auto D = tensor(B.extents()); - // // // std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); - // // std::cout << "% --------------------------- " << std::endl; - // // std::cout << "% --------------------------- " << std::endl << std::endl; - // // std::cout << "D=" << D << ";" << std::endl << std::endl; - - // // reshaping tensors. - // auto new_extents = B.extents().base(); - // std::next_permutation( new_extents.begin(), new_extents.end() ); - // auto E = reshape( D, shape(new_extents) ); - // std::cout << "% --------------------------- " << std::endl; - // std::cout << "% --------------------------- " << std::endl << std::endl; - // std::cout << "E=" << E << ";" << std::endl << std::endl; - - - // } catch (const std::exception& e) { - // std::cerr << "Cought exception " << e.what(); - // std::cerr << "in the main function of access-tensor." << std::endl; - // } + + // // computes the complex conjugate of elements of B + // // using iterators. + auto D = tensor(B.extents()); + // // std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); + // std::cout << "% --------------------------- " << std::endl; + // std::cout << "% --------------------------- " << std::endl << std::endl; + // std::cout << "D=" << D << ";" << std::endl << std::endl; + + // reshaping tensors. + auto new_extents = B.extents().base(); + std::next_permutation( new_extents.begin(), new_extents.end() ); + auto E = reshape( D, shape(new_extents) ); + std::cout << "% --------------------------- " << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "E=" << E << ";" << std::endl << std::endl; + + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << "in the main function of access-tensor." << std::endl; + } } diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index edf082ae7..a2446beb9 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -62,7 +62,7 @@ test-suite boost-ublas-tensor-test test_static_tensor.cpp test_static_tensor_matrix_vector.cpp test_strides.cpp - test_subtensor.cpp + # test_subtensor.cpp test_subtensor_utility.cpp test_tensor.cpp test_tensor_matrix_vector.cpp From aca6b23d833642fe01801ac6ece63eea6ad285a5 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Fri, 20 Aug 2021 22:20:22 +0530 Subject: [PATCH 29/40] WIP: static_rank_subtensor --- examples/tensor/Jamfile | 14 +- examples/tensor/access_subtensor.cpp | 146 +++++++++--------- .../ublas/tensor/subtensor_utility.hpp | 21 +++ .../ublas/tensor/tensor/subtensor_dynamic.hpp | 36 ++++- .../tensor/tensor/subtensor_static_rank.hpp | 70 ++++++--- .../tensor/tensor/tensor_static_rank.hpp | 2 - test/tensor/test_access.cpp | 6 +- 7 files changed, 180 insertions(+), 115 deletions(-) diff --git a/examples/tensor/Jamfile b/examples/tensor/Jamfile index d8dcfcfff..5486e4fbd 100644 --- a/examples/tensor/Jamfile +++ b/examples/tensor/Jamfile @@ -19,11 +19,11 @@ project boost-ublas-tensor-example [ requires cxx17_if_constexpr ] ; -exe access_tensor : access_tensor.cpp ; -exe simple_expressions : simple_expressions.cpp ; -exe multiply_tensors_product_function : multiply_tensors_product_function.cpp ; -exe multiply_tensors_einstein_notation : multiply_tensors_einstein_notation.cpp ; -exe instantiate_tensor : instantiate_tensor.cpp ; -exe expressions_subtensor : expressions_subtensor.cpp ; -exe instantiate_subtensor : instantiate_subtensor.cpp ; +# exe access_tensor : access_tensor.cpp ; +# exe simple_expressions : simple_expressions.cpp ; +# exe multiply_tensors_product_function : multiply_tensors_product_function.cpp ; +# exe multiply_tensors_einstein_notation : multiply_tensors_einstein_notation.cpp ; +# exe instantiate_tensor : instantiate_tensor.cpp ; +# exe expressions_subtensor : expressions_subtensor.cpp ; +# exe instantiate_subtensor : instantiate_subtensor.cpp ; exe access_subtensor : access_subtensor.cpp ; diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index d0635aa5c..7a59b0fc4 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -11,7 +11,7 @@ // #include -#include +// #include #include @@ -42,17 +42,15 @@ int main() } } auto A = t1(span(1), span(0,2,2), span()); + auto B = A(span(), span(), span()); + std::cout << "% --------------------------- " << std::endl; + for (auto x: B.extents().base()) { + std::cout << x << " "; + } + std::cout << std::endl; + std::cout << "% --------------------------- " << std::endl; tensor t2 = ones(1,2,2); - std::cout << "% --------------------------- " << std::endl; - std::cout << A(0,0,0) << std::endl; - std::cout << A(0,0,1) << std::endl; - std::cout << A(0,1,0) << std::endl; - std::cout << A(0,1,1) << std::endl; - std::cout << A(1,0,0) << std::endl; - std::cout << A(1,0,1) << std::endl; - std::cout << A(1,1,0) << std::endl; - std::cout << A(1,1,1) << std::endl; std::cout << "% --------------------------- " << std::endl; tensor t3 = t2 * A; @@ -68,68 +66,68 @@ int main() std::cerr << " in the main function of access-tensor." << std::endl; } - try { - using value = std::complex; - using layout = ublas::layout::last_order; // storage format - using tensor = ublas::tensor_dynamic; - using shape = typename tensor::extents_type; - using span = ublas::span<>; - constexpr auto zeros = ublas::zeros{}; - - - // creates a four-dimensional tensor with extents 5,4,3 and 2 - // tensor A stores complex floating-point extended double precision numbers - // according to the last-order storage format - // and initializes it with the default value. - - //NOLINTNEXTLINE - tensor t1 = zeros(5,4,3,2); - auto B = t1(span(), span(), span(), span()); - - // initializes the tensor with increasing values along the last-index - // using a single-index - auto vc = value(0,0); - for(auto i = 0u; i < B.size(); ++i, vc += value(1,1)) - B[i] = vc; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "B=" << B << ";" << std::endl << std::endl; - - auto C = tensor(B.extents()); - // computes the complex conjugate of elements of B - // using multi-index notation. - for(auto i = 0u; i < B.size(0); ++i) - for(auto j = 0u; j < B.size(1); ++j) - for(auto k = 0u; k < B.size(2); ++k) - for(auto l = 0u; l < B.size(3); ++l) - C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); - - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "C=" << C << ";" << std::endl << std::endl; - - - // // computes the complex conjugate of elements of B - // // using iterators. - auto D = tensor(B.extents()); - // // std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); - // std::cout << "% --------------------------- " << std::endl; - // std::cout << "% --------------------------- " << std::endl << std::endl; - // std::cout << "D=" << D << ";" << std::endl << std::endl; - - // reshaping tensors. - auto new_extents = B.extents().base(); - std::next_permutation( new_extents.begin(), new_extents.end() ); - auto E = reshape( D, shape(new_extents) ); - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; - std::cout << "E=" << E << ";" << std::endl << std::endl; - - - } catch (const std::exception& e) { - std::cerr << "Cought exception " << e.what(); - std::cerr << "in the main function of access-tensor." << std::endl; - } + // try { + // using value = std::complex; + // using layout = ublas::layout::last_order; // storage format + // using tensor = ublas::tensor_dynamic; + // using shape = typename tensor::extents_type; + // using span = ublas::span<>; + // constexpr auto zeros = ublas::zeros{}; + + + // // creates a four-dimensional tensor with extents 5,4,3 and 2 + // // tensor A stores complex floating-point extended double precision numbers + // // according to the last-order storage format + // // and initializes it with the default value. + + // //NOLINTNEXTLINE + // tensor t1 = zeros(5,4,3,2); + // auto B = t1(span(), span(), span(), span()); + + // // initializes the tensor with increasing values along the last-index + // // using a single-index + // auto vc = value(0,0); + // for(auto i = 0u; i < B.size(); ++i, vc += value(1,1)) + // B[i] = vc; + + // // formatted output + // std::cout << "% --------------------------- " << std::endl; + // std::cout << "% --------------------------- " << std::endl << std::endl; + // std::cout << "B=" << B << ";" << std::endl << std::endl; + + // auto C = tensor(B.extents()); + // // computes the complex conjugate of elements of B + // // using multi-index notation. + // for(auto i = 0u; i < B.size(0); ++i) + // for(auto j = 0u; j < B.size(1); ++j) + // for(auto k = 0u; k < B.size(2); ++k) + // for(auto l = 0u; l < B.size(3); ++l) + // C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); + + // std::cout << "% --------------------------- " << std::endl; + // std::cout << "% --------------------------- " << std::endl << std::endl; + // std::cout << "C=" << C << ";" << std::endl << std::endl; + + + // // // computes the complex conjugate of elements of B + // // // using iterators. + // auto D = tensor(B.extents()); + // // // std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); + // // std::cout << "% --------------------------- " << std::endl; + // // std::cout << "% --------------------------- " << std::endl << std::endl; + // // std::cout << "D=" << D << ";" << std::endl << std::endl; + + // // reshaping tensors. + // auto new_extents = B.extents().base(); + // std::next_permutation( new_extents.begin(), new_extents.end() ); + // auto E = reshape( D, shape(new_extents) ); + // std::cout << "% --------------------------- " << std::endl; + // std::cout << "% --------------------------- " << std::endl << std::endl; + // std::cout << "E=" << E << ";" << std::endl << std::endl; + + + // } catch (const std::exception& e) { + // std::cerr << "Cought exception " << e.what(); + // std::cerr << "in the main function of access-tensor." << std::endl; + // } } diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp index cfee95fd9..6efac853f 100644 --- a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -167,6 +167,27 @@ auto generate_span_array(extents<> const& extents, Spans&& ... spans) return span_array; } +/*! @brief Auxiliary function for subtensor that generates array of spans + * + * generate_span_array(shape(4,3,5,2), span(), 1, span(2,end), end ) + * -> std::array (span(0,3), span(1,1), span(2,4),span(1,1)) + * + * @note span is zero-based indexed. + * + * @param[in] extents of the tensor + * @param[in] spans spans with which the subtensor is created + */ +template +auto generate_span_array(extents const& extents, Spans&& ... spans) +{ + constexpr static auto n = sizeof...(Spans); + static_assert(N == n); + std::array span_array; + if constexpr (n>0) + transform_spans_impl<0>( extents, span_array, std::forward(spans)... ); + return span_array; +} + /*! @brief Auxiliary function for subtensor that generates array of spans * * generate_span_vector(shape(4,3,5,2), span(), 1, span(2,end), end ) diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index 6516aae85..11da1b7d7 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -87,6 +87,9 @@ class tensor_core>> // using const_reverse_iterator = // typename container_traits_type::const_reverse_iterator; + using matrix_type = matrix >; + using vector_type = vector >; + using container_tag = typename container_traits_type::container_tag; using resizable_tag = typename container_traits_type::resizable_tag; @@ -136,6 +139,21 @@ class tensor_core>> std::cout << _offset << std::endl; } + // TODO + // template + // tensor_core(const tensor_core& t, FS&& first, SL&&... spans) + // : tensor_expression_type{} + // , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) + // , _extents{} + // , _strides{} + // , _span_strides(detail::to_span_strides(t.strides(), _spans)) + // , _offset{detail::to_offset(t.strides(), _spans)} + // , _tensor(t._tensor) + // { + // _extents = detail::to_extents(_spans); + // _strides = ublas::to_strides(_extents,layout_type{}); + // } + tensor_core(tensor_core&& v) : tensor_expression_type{} , _spans (std::move(v._spans)) @@ -360,7 +378,7 @@ class tensor_core>> * @tparam spans */ template - [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) const noexcept + [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) const { constexpr auto size = sizeof...(spans)+1; if(size != this->order()){ @@ -368,12 +386,15 @@ class tensor_core>> "Cannot create subtensor " "Number of provided indices does not match with tensor order."); } - // TODO find way to convert spans - return subtensor_type(_tensor, std::forward(s), std::forward(spans)...); + size_type n = size; + auto convert = [&] (auto arg) { + return _spans[--n](arg); + }; + return subtensor_type(_tensor, std::forward(convert(s)), std::forward(convert(spans))...); } template - [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) noexcept + [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) { constexpr auto size = sizeof...(spans)+1; if(size != this->order()){ @@ -381,8 +402,11 @@ class tensor_core>> "Cannot create subtensor " "Number of provided indices does not match with tensor order."); } - // TODO find way to convert spans - return subtensor_type(_tensor, std::forward(s), std::forward(spans)...); + size_type n = size; + auto convert = [&] (auto arg) { + return _spans[--n](arg); + }; + return subtensor_type(_tensor, std::forward(convert(s)), std::forward(convert(spans))...); } // [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp index d2dfa82a1..d70e54d9b 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -106,6 +106,8 @@ class tensor_core>> , _spans() , _extents(t.extents()) , _strides(t.strides()) + , _span_strides(t.strides()) + , _offset(size_type(0)) , _tensor(t) { } @@ -113,12 +115,15 @@ class tensor_core>> template tensor_core(U&& t, FS&& first, SL&&... spans) : tensor_expression_type{} - , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) + , _spans(detail::generate_span_array(t.extents(), std::forward(first), std::forward(spans)...)) , _extents{} - , _strides(detail::to_span_strides(t.strides(), _spans)) + , _strides{} + , _span_strides(detail::to_span_strides(t.strides(), _spans)) + , _offset{detail::to_offset(t.strides(), _spans)} , _tensor(t) { _extents = detail::to_extents(_spans); + _strides = ublas::to_strides(_extents,layout_type{}); } tensor_core(tensor_core&& v) @@ -126,7 +131,9 @@ class tensor_core>> , _spans (std::move(v._spans)) , _extents(std::move(v._extents)) , _strides(std::move(v._strides)) - , _tensor (v._tensor) + , _span_strides(std::move(v._span_strides)) + , _offset(std::move(v._offset)) + , _tensor(std::move(v._tensor)) { _extents = detail::to_extents(_spans); } @@ -171,6 +178,7 @@ class tensor_core>> // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) tensor_core& operator=(tensor_core other) noexcept { + // TODO implement swap swap (*this, other); return *this; } @@ -196,8 +204,8 @@ class tensor_core>> [[nodiscard]] inline const_reference at(I1 i1, I2 i2, Is... is) const { static_assert (sizeof...(is)+2 == std::tuple_size_v); - const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); - return _tensor.at(idx); + const auto idx = ublas::detail::to_index(_span_strides, i1, i2, is...); + return _tensor[idx + _offset]; } /** @brief Element access using a multi-index with bound checking which can @@ -214,8 +222,8 @@ class tensor_core>> [[nodiscard]] inline reference at(I1 i1, I2 i2, Is... is) { static_assert (sizeof...(Is)+2 == std::tuple_size_v); - const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); - return _tensor.at(idx); + const auto idx = ublas::detail::to_index(_span_strides, i1, i2, is...); + return _tensor[idx + _offset]; } /** @brief Element access using a multi-index with bound checking which can @@ -257,8 +265,8 @@ class tensor_core>> */ [[nodiscard]] inline const_reference operator[](size_type i) const { - const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); - return this->_tensor[idx]; + const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + return _tensor[idx]; } /** @brief Element access using a single index. @@ -269,8 +277,10 @@ class tensor_core>> */ [[nodiscard]] inline reference operator[](size_type i) { - const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); - return this->_tensor[idx]; + std::cout << "idx:" << i; + const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + std::cout << "->" << idx << std::endl; + return _tensor[idx]; } /** @brief Element access using a single-index with bound checking which can @@ -283,9 +293,8 @@ class tensor_core>> template [[nodiscard]] inline const_reference at(size_type i) const { - - const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); - return this->_tensor.at(idx); + const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + return _tensor[idx]; } /** @brief Read tensor element of a tensor \c t with a single-index \c i @@ -296,8 +305,8 @@ class tensor_core>> */ [[nodiscard]] inline reference at(size_type i) { - const auto idx = detail::compute_single_index(i, _tensor.strides().begin(), _tensor.strides().end(), _strides.begin()); - return this->_tensor.at(idx); + const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + return _tensor[idx]; } /** @brief Generates a tensor_core index for tensor_core contraction @@ -328,13 +337,25 @@ class tensor_core>> template [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) const noexcept { - return subtensor_type(_tensor, _strides, std::forward(s), std::forward(spans)...); + constexpr auto size = sizeof...(spans)+1; + static_assert(size == this->order()); + size_type n = size; + auto convert = [&] (auto arg) { + return _spans[--n](arg); + }; + return subtensor_type(_tensor, std::forward(convert(s)), std::forward(convert(spans))...); } template [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) noexcept { - return subtensor_type(_tensor, _strides, std::forward(s), std::forward(spans)...); + constexpr auto size = sizeof...(spans)+1; + static_assert(size == this->order()); + size_type n = size; + auto convert = [&] (auto arg) { + return _spans[--n](arg); + }; + return subtensor_type(_tensor, std::forward(convert(s)), std::forward(convert(spans))...); } // [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } @@ -357,19 +378,22 @@ class tensor_core>> [[nodiscard]] inline auto order () const { return this->rank(); } [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } + [[nodiscard]] inline auto const& span_strides () const noexcept { return _span_strides; } [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), _spans);} [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), _spans);} - [[nodiscard]] inline auto const& base () const noexcept { return _tensor.container(); } + // [[nodiscard]] inline auto const& base () const noexcept { return _tensor.container(); } private: /** * @brief There might be cases where spans cannot be computed on creation */ - std::vector _spans; - extents_type _extents; - strides_type _strides; - tensor_type& _tensor; + std::array _spans; + extents_type _extents; + strides_type _strides; + strides_type _span_strides; + std::size_t _offset; + tensor_type& _tensor; }; } // namespace boost::numeric::ublas diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp index 93391f115..17d580b0c 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp @@ -33,8 +33,6 @@ #include "tensor_engine.hpp" - - namespace boost::numeric::ublas { template diff --git a/test/tensor/test_access.cpp b/test/tensor/test_access.cpp index dd0b08607..f51838d0e 100644 --- a/test/tensor/test_access.cpp +++ b/test/tensor/test_access.cpp @@ -188,7 +188,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index, layout_t, layout_t auto const& jref = std::get(index); mp::mp_for_each>( [&]( auto K ) { auto const& ii = std::get(i); - auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,w.begin()); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,w.begin(), 0); BOOST_CHECK(j < prodn(n)); BOOST_CHECK_EQUAL(j,jref[K]); }); @@ -212,7 +212,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_static_rank, layout_ constexpr auto r = std::get(ranks); mp::mp_for_each>( [&]( auto K ) { auto const& ii = std::get(i); - auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() , w.begin()); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() , w.begin(), 0); BOOST_CHECK(j < prodn(n)); BOOST_CHECK_EQUAL(j,jref[K]); }); @@ -301,7 +301,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_subtensor, layout_t, auto const& jref = index[k]; for(auto kk = 0u; kk < jref.size(); ++kk){ auto const jj = jref[kk]; - auto const j = ub::detail::compute_single_index(jj,w.begin(),w.end(),w.begin()); + auto const j = ub::detail::compute_single_index(jj,w.begin(),w.end(),w.begin(), 0); BOOST_CHECK_EQUAL ( j, jj ) ; } } From 6a993628153000238f1b32bb85c506154cf13775 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Sat, 21 Aug 2021 13:39:41 +0530 Subject: [PATCH 30/40] subtensor static rank complete --- examples/tensor/access_subtensor.cpp | 22 ++--- examples/tensor/instantiate_subtensor.cpp | 39 ++++++-- .../ublas/tensor/operators_arithmetic.hpp | 8 +- .../boost/numeric/ublas/tensor/ostream.hpp | 35 -------- .../ublas/tensor/subtensor_utility.hpp | 88 +++++++++++++++++-- include/boost/numeric/ublas/tensor/tensor.hpp | 1 + .../ublas/tensor/tensor/subtensor_dynamic.hpp | 22 ++--- .../tensor/tensor/subtensor_static_rank.hpp | 58 +++++++----- .../ublas/tensor/tensor/tensor_dynamic.hpp | 1 + .../tensor/tensor/tensor_static_rank.hpp | 23 ++++- 10 files changed, 201 insertions(+), 96 deletions(-) diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index 7a59b0fc4..a1655ee59 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -21,11 +21,11 @@ int main() namespace ublas = boost::numeric::ublas; try { - using value = float; + using value = std::complex; using layout = ublas::layout::first_order; // storage format - using tensor = ublas::tensor_dynamic; + using tensor = ublas::tensor_static_rank; using span = ublas::span<>; - constexpr auto ones = ublas::ones{}; + constexpr auto ones = ublas::ones_static_rank{}; // creates a three-dimensional tensor with extents 3,4 and 2 @@ -33,11 +33,12 @@ int main() // to the first-order storage format tensor t1 = ones(3,3,2); - int cnt = 0; + value cnt(0,0); for (auto i = 0u; i < t1.size(0); i++) { for (auto j = 0u; j < t1.size(1); j++) { for (auto k = 0u; k < t1.size(2); k++) { - t1(i,j,k) = cnt++; + t1(i,j,k) = cnt; + cnt+= value(1,1); } } } @@ -47,16 +48,11 @@ int main() for (auto x: B.extents().base()) { std::cout << x << " "; } - std::cout << std::endl; - std::cout << "% --------------------------- " << std::endl; tensor t2 = ones(1,2,2); + auto t3 = ublas::inner_prod(A, t2); - std::cout << "% --------------------------- " << std::endl; - tensor t3 = t2 * A; - - // formatted output - std::cout << "% --------------------------- " << std::endl; - std::cout << "% --------------------------- " << std::endl << std::endl; + // // // formatted output + // std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "t1=" << t1 << ";" << std::endl << std::endl; std::cout << "A=" << A << ";" << std::endl << std::endl; std::cout << "t2=" << t2 << ";" << std::endl << std::endl; diff --git a/examples/tensor/instantiate_subtensor.cpp b/examples/tensor/instantiate_subtensor.cpp index 71574bc10..a146dcce1 100644 --- a/examples/tensor/instantiate_subtensor.cpp +++ b/examples/tensor/instantiate_subtensor.cpp @@ -12,29 +12,56 @@ void instantiate_subtensor_dynamic() using span = ublas::span<>; try { - tensor t1 = ones(3, 4, 2); + tensor t1 = ones(3,3,2); std::cout << "t1 = " << t1 << std::endl; - auto st1 = t1(span(0,ublas::max), span(), span(0,1)); + auto st1 = t1(span(0,1), span(0,2), span()); std::cout << "st1 = " << st1 << std::endl; + auto st2 = st1(span(1), span(0,2,2), span()); + + std::cout << "st2 = " << st1 << std::endl; + } catch (const std::exception& e) { std::cerr << "Cought exception " << e.what(); - std::cerr << "in the instantiate_tensor_dynamic function of instantiate-tensor." << std::endl; + std::cerr << " in the instantiate_subtensor_dynamic function of instantiate-tensor." << std::endl; throw; } } -void instantiate_subtensor_dynamic_with_static_order() +void instantiate_subtensor_with_static_order() { -} + namespace ublas = boost::numeric::ublas; + using value = float; + using layout = boost::numeric::ublas::layout::first_order; // storage format + using tensor = boost::numeric::ublas::tensor_static_rank; + constexpr auto ones = ublas::ones_static_rank{}; + using span = ublas::span<>; + + try { + tensor t1 = ones(3, 3, 2); + std::cout << "t1 = " << t1 << std::endl; + + auto st1 = t1(span(0,1), span(0,2,2), span()); + std::cout << "st1 = " << st1 << std::endl; + + auto st2 = st1(span(1), span(), span()); + + std::cout << "st2 = " << st1 << std::endl; + + } catch (const std::exception& e) { + std::cerr << "Cought exception " << e.what(); + std::cerr << " in the instantiate_subtensor_with_static_order of instantiate-tensor." << std::endl; + throw; + } +} int main() { try{ instantiate_subtensor_dynamic(); - instantiate_subtensor_dynamic_with_static_order(); + instantiate_subtensor_with_static_order(); } catch (const std::exception& e) { std::cerr << "Cought exception " << e.what(); std::cerr << "in the main function of instantiate-tensor." << std::endl; diff --git a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp index fc4492a0b..2db42f021 100644 --- a/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp +++ b/include/boost/numeric/ublas/tensor/operators_arithmetic.hpp @@ -396,7 +396,7 @@ inline template inline constexpr auto& operator += (boost::numeric::ublas::tensor_core& lhs, - const boost::numeric::ublas::detail::tensor_expression>,D> &expr) + const boost::numeric::ublas::detail::tensor_expression &expr) { boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l+=r; } ); return lhs; @@ -405,7 +405,7 @@ inline template inline constexpr auto& operator -= (boost::numeric::ublas::tensor_core& lhs, - const boost::numeric::ublas::detail::tensor_expression>,D> &expr) + const boost::numeric::ublas::detail::tensor_expression &expr) { boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l-=r; } ); return lhs; @@ -414,7 +414,7 @@ inline template inline constexpr auto& operator *= (boost::numeric::ublas::tensor_core& lhs, - const boost::numeric::ublas::detail::tensor_expression>,D> &expr) + const boost::numeric::ublas::detail::tensor_expression &expr) { boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l*=r; } ); return lhs; @@ -423,7 +423,7 @@ inline template inline constexpr auto& operator /= (boost::numeric::ublas::tensor_core& lhs, - const boost::numeric::ublas::detail::tensor_expression>,D> &expr) + const boost::numeric::ublas::detail::tensor_expression &expr) { boost::numeric::ublas::detail::eval(lhs, expr(), [](auto& l, auto const& r) { l/=r; } ); return lhs; diff --git a/include/boost/numeric/ublas/tensor/ostream.hpp b/include/boost/numeric/ublas/tensor/ostream.hpp index 15662c0a9..2ce7940cc 100644 --- a/include/boost/numeric/ublas/tensor/ostream.hpp +++ b/include/boost/numeric/ublas/tensor/ostream.hpp @@ -84,9 +84,6 @@ namespace boost::numeric::ublas template class tensor_core; -template -struct subtensor_engine; - } //namespace boost::numeric::ublas @@ -120,36 +117,4 @@ std::ostream& operator << (std::ostream& out, class boost::numeric::ublas::tenso return out; } -template -std::ostream& operator << (std::ostream& out, - class boost::numeric::ublas::tensor_core< - boost::numeric::ublas::subtensor_engine> const& t) -{ - - namespace ublas = boost::numeric::ublas; - - auto const& n = t.extents(); - auto const& w = t.span_strides(); - - if(is_scalar(n)){ - out << '['; - ublas::detail::print(out,t[0]); - out << ']'; - } - else if(is_vector(n)) { - const auto& cat = n.at(0) > n.at(1) ? ';' : ','; - out << '['; - for(auto i = 0u; i < t.size()-1; ++i){ - ublas::detail::print(out,t[i]); - out << cat << ' '; - } - ublas::detail::print(out,t[t.size()-1]); - out << ']'; - } - else{ - boost::numeric::ublas::detail::print(out, t.rank()-1, t.data(), w.data(), n.data()); - } - return out; -} - #endif diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp index 6efac853f..52ddd416e 100644 --- a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -50,6 +50,27 @@ auto to_span_strides(std::vector const& strides, Spans const& spans) return std::vector( span_strides ); } +/*! @brief Computes span strides for a subtensor + * + * span stride v is computed according to: v[i] = w[i]*s[i], where + * w[i] is the i-th stride of the tensor + * s[i] is the step size of the i-th span + * + * @param[in] strides strides of the tensor, the subtensor refers to + * @param[in] spans vector of spans of the subtensor +*/ +template +auto to_span_strides(std::array const& strides, std::array const& spans) +{ + auto span_strides = std::array{}; + + std::transform(strides.begin(), strides.end(), spans.begin(), span_strides.begin(), + [](auto w, auto const& s) { return w * s.step(); } ); + + return std::array( span_strides ); +} + + /*! @brief Computes the data pointer offset for a subtensor * * offset is computed according to: sum ( f[i]*w[i] ), where @@ -69,6 +90,23 @@ auto to_offset(std::vector const& strides, Spans const& spans) std::plus(), [](auto const& s, Size w) {return s.first() * w; } ); } +/*! @brief Computes the data pointer offset for a subtensor + * + * offset is computed according to: sum ( f[i]*w[i] ), where + * f[i] is the first element of the i-th span + * w[i] is the i-th stride of the tensor + * + * @param[in] strides strides of the tensor, the subtensor refers to + * @param[in] spans vector of spans of the subtensor +*/ +template +auto to_offset(std::array const& strides, std::array const& spans) +{ + + return std::inner_product(spans.begin(), spans.end(), strides.begin(), Size(0), + std::plus(), [](auto const& s, Size w) {return s.first() * w; } ); +} + /*! @brief Computes the extents of the subtensor. * @@ -88,6 +126,24 @@ auto to_extents(spans_type const& spans) return extents_t( extents ); } +/*! @brief Computes the extents of the subtensor. + * + * i-th extent is given by span[i].size() + * + * @param[in] spans vector of spans of the subtensor + */ +template +auto to_extents(std::array const& spans) +{ + using extents_t = extents; + using base_type = typename extents_t::base_type; + if(spans.empty()) + return extents_t{}; + auto extents = base_type(); + std::transform(spans.begin(), spans.end(), extents.begin(), [](auto const& s) { return s.size(); } ); + return extents_t( extents ); +} + /*! @brief Auxiliary function for subtensor which possibly transforms a span instance * @@ -145,6 +201,30 @@ void transform_spans_impl (extents<> const& extents, std::array& span_ar } +template +void transform_spans_impl (extents const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ); + +template +void transform_spans_impl(extents const& extents, std::array& span_array, span const& s, Spans&& ... spans) +{ + std::get(span_array) = transform_span(s, extents[r]); + static constexpr auto nspans = sizeof...(spans); + static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); + if constexpr (nspans>0) + transform_spans_impl(extents, span_array, std::forward(spans)...); +} + +template +void transform_spans_impl (extents const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ) +{ + static constexpr auto nspans = sizeof...(Spans); + static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); + std::get(span_array) = transform_span(Span(arg), extents[r]); + if constexpr (nspans>0) + transform_spans_impl(extents, span_array, std::forward(spans) ... ); +} + + /*! @brief Auxiliary function for subtensor that generates array of spans * * generate_span_array(shape(4,3,5,2), span(), 1, span(2,end), end ) @@ -177,11 +257,11 @@ auto generate_span_array(extents<> const& extents, Spans&& ... spans) * @param[in] extents of the tensor * @param[in] spans spans with which the subtensor is created */ -template +template auto generate_span_array(extents const& extents, Spans&& ... spans) { constexpr static auto n = sizeof...(Spans); - static_assert(N == n); + static_assert(N == n, "Static Error in boost::numeric::ublas::generate_span_vector() when creating subtensor: the number of spans does not match with the tensor rank."); std::array span_array; if constexpr (n>0) transform_spans_impl<0>( extents, span_array, std::forward(spans)... ); @@ -207,8 +287,4 @@ auto generate_span_vector(extents<> const& extents, Spans&& ... spans) } // namespace boost::numeric::ublas::detail - - - - #endif diff --git a/include/boost/numeric/ublas/tensor/tensor.hpp b/include/boost/numeric/ublas/tensor/tensor.hpp index a64307450..13d73c220 100644 --- a/include/boost/numeric/ublas/tensor/tensor.hpp +++ b/include/boost/numeric/ublas/tensor/tensor.hpp @@ -19,5 +19,6 @@ #include "tensor/tensor_static_rank.hpp" #include "tensor/tensor_static.hpp" #include "tensor/subtensor_dynamic.hpp" +#include "tensor/subtensor_static_rank.hpp" #endif // BOOST_UBLAS_TENSOR_TENSOR_HPP diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index 11da1b7d7..c1a3eae9e 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -12,6 +12,8 @@ #ifndef BOOST_UBLAS_SUBTENSOR_DYNAMIC_HPP #define BOOST_UBLAS_SUBTENSOR_DYNAMIC_HPP +#include + #include "../access.hpp" #include "../algorithms.hpp" #include "../concepts.hpp" @@ -26,10 +28,10 @@ #include "../traits/read_write_traits.hpp" #include "../type_traits.hpp" #include "../subtensor_utility.hpp" + #include "subtensor_engine.hpp" #include "tensor_dynamic.hpp" -#include namespace boost::numeric::ublas { @@ -117,13 +119,13 @@ class tensor_core>> : tensor_expression_type{} , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) , _extents{} - , _strides{} - , _span_strides(detail::to_span_strides(t.strides(), _spans)) + , _strides{detail::to_span_strides(t.strides(), _spans)} + , _span_strides{} , _offset{detail::to_offset(t.strides(), _spans)} , _tensor(t) { _extents = detail::to_extents(_spans); - _strides = ublas::to_strides(_extents,layout_type{}); + _span_strides = ublas::to_strides(_extents,layout_type{}); for (int i = 0; i < (int) _extents.size(); i++) { std::cout << _extents[i] << " "; } @@ -237,7 +239,7 @@ class tensor_core>> "Cannot access tensor with multi-index. " "Number of provided indices does not match with tensor order."); } - const auto idx = ublas::detail::to_index(_span_strides, i1, i2, is...); + const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); return _tensor[idx + _offset]; } @@ -260,7 +262,7 @@ class tensor_core>> "Cannot access tensor with multi-index." "Number of provided indices does not match with tensor order."); } - const auto idx = ublas::detail::to_index(_span_strides, i1, i2, is...); + const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); return _tensor[idx + _offset]; } @@ -303,7 +305,7 @@ class tensor_core>> */ [[nodiscard]] inline const_reference operator[](size_type i) const { - const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); return _tensor[idx]; } @@ -316,7 +318,7 @@ class tensor_core>> [[nodiscard]] inline reference operator[](size_type i) { std::cout << "idx:" << i; - const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); std::cout << "->" << idx << std::endl; return _tensor[idx]; } @@ -331,7 +333,7 @@ class tensor_core>> template [[nodiscard]] inline const_reference at(size_type i) const { - const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); return _tensor[idx]; } @@ -343,7 +345,7 @@ class tensor_core>> */ [[nodiscard]] inline reference at(size_type i) { - const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); return _tensor[idx]; } diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp index d70e54d9b..4ab157b48 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -12,6 +12,8 @@ #ifndef BOOST_UBLAS_SUBTENSOR_STATIC_RANK_HPP #define BOOST_UBLAS_SUBTENSOR_STATIC_RANK_HPP +#include + #include "../access.hpp" #include "../algorithms.hpp" #include "../concepts.hpp" @@ -30,17 +32,18 @@ #include "subtensor_engine.hpp" #include "tensor_static_rank.hpp" -#include namespace boost::numeric::ublas { -template -class tensor_core>> +template +class tensor_core>>> : public detail::tensor_expression< - tensor_core>>, - tensor_core>>> { + tensor_core>>>, + tensor_core>>>> { public: - using tensor_type = tensor_static_rank; + using tensor_type = tensor_core>; using engine_type = subtensor_engine; using self_type = tensor_core; @@ -117,15 +120,30 @@ class tensor_core>> : tensor_expression_type{} , _spans(detail::generate_span_array(t.extents(), std::forward(first), std::forward(spans)...)) , _extents{} - , _strides{} - , _span_strides(detail::to_span_strides(t.strides(), _spans)) + , _strides{detail::to_span_strides(t.strides(), _spans)} + , _span_strides{} , _offset{detail::to_offset(t.strides(), _spans)} , _tensor(t) { _extents = detail::to_extents(_spans); - _strides = ublas::to_strides(_extents,layout_type{}); + _span_strides = ublas::to_strides(_extents,layout_type{}); } + // TODO + // template + // tensor_core(const tensor_core& t, FS&& first, SL&&... spans) + // : tensor_expression_type{} + // , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) + // , _extents{} + // , _strides{} + // , _span_strides(detail::to_span_strides(t.strides(), _spans)) + // , _offset{detail::to_offset(t.strides(), _spans)} + // , _tensor(t._tensor) + // { + // _extents = detail::to_extents(_spans); + // _strides = ublas::to_strides(_extents,layout_type{}); + // } + tensor_core(tensor_core&& v) : tensor_expression_type{} , _spans (std::move(v._spans)) @@ -204,7 +222,7 @@ class tensor_core>> [[nodiscard]] inline const_reference at(I1 i1, I2 i2, Is... is) const { static_assert (sizeof...(is)+2 == std::tuple_size_v); - const auto idx = ublas::detail::to_index(_span_strides, i1, i2, is...); + const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); return _tensor[idx + _offset]; } @@ -222,7 +240,7 @@ class tensor_core>> [[nodiscard]] inline reference at(I1 i1, I2 i2, Is... is) { static_assert (sizeof...(Is)+2 == std::tuple_size_v); - const auto idx = ublas::detail::to_index(_span_strides, i1, i2, is...); + const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); return _tensor[idx + _offset]; } @@ -265,7 +283,7 @@ class tensor_core>> */ [[nodiscard]] inline const_reference operator[](size_type i) const { - const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); return _tensor[idx]; } @@ -278,7 +296,7 @@ class tensor_core>> [[nodiscard]] inline reference operator[](size_type i) { std::cout << "idx:" << i; - const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); std::cout << "->" << idx << std::endl; return _tensor[idx]; } @@ -293,7 +311,7 @@ class tensor_core>> template [[nodiscard]] inline const_reference at(size_type i) const { - const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); return _tensor[idx]; } @@ -305,7 +323,7 @@ class tensor_core>> */ [[nodiscard]] inline reference at(size_type i) { - const auto idx = detail::compute_single_index(i, _span_strides.rbegin(), _span_strides.rend(), _strides.rbegin(), _offset); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); return _tensor[idx]; } @@ -338,7 +356,7 @@ class tensor_core>> [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) const noexcept { constexpr auto size = sizeof...(spans)+1; - static_assert(size == this->order()); + static_assert(size == std::tuple_size_v); size_type n = size; auto convert = [&] (auto arg) { return _spans[--n](arg); @@ -350,7 +368,7 @@ class tensor_core>> [[nodiscard]] inline decltype(auto) operator()(span_type&& s, SL&&... spans) noexcept { constexpr auto size = sizeof...(spans)+1; - static_assert(size == this->order()); + static_assert(size == std::tuple_size_v); size_type n = size; auto convert = [&] (auto arg) { return _spans[--n](arg); @@ -374,14 +392,14 @@ class tensor_core>> [[nodiscard]] inline auto empty () const noexcept { return size() == 0; } [[nodiscard]] inline auto size () const noexcept { return ublas::product(_extents);} [[nodiscard]] inline auto size (size_type r) const { return _extents.at(r); } - [[nodiscard]] inline auto rank () const { return return std::tuple_size_v; } + [[nodiscard]] inline auto rank () const { return std::tuple_size_v; } [[nodiscard]] inline auto order () const { return this->rank(); } [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } [[nodiscard]] inline auto const& span_strides () const noexcept { return _span_strides; } [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } - [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), _spans);} - [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + detail::to_offset(_tensor.strides(), _spans);} + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + _offset;} + [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + _offset; } // [[nodiscard]] inline auto const& base () const noexcept { return _tensor.container(); } private: diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index 63b0bee0d..95e2ec4b3 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -28,6 +28,7 @@ #include "../tags.hpp" #include "../concepts.hpp" #include "../span.hpp" + #include "subtensor_engine.hpp" #include "tensor_engine.hpp" diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp index 17d580b0c..a66b2bb7a 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp @@ -38,6 +38,9 @@ namespace boost::numeric::ublas { template using engine_tensor_static_rank = tensor_engine, L, std::vector>; +template +class tensor_core>>>; + template class tensor_core> : public detail::tensor_expression< @@ -89,6 +92,9 @@ template using span_type = span; using subtensor_type = tensor_core>; + template + using subtensor_expression_type = detail::tensor_expression; + tensor_core () = default; /** @brief Constructs a tensor_core with a \c shape @@ -163,6 +169,19 @@ template { } + /** @brief Constructs a tensor_core with another tensor_core with a subtensor_engine + * + * @param other tensor_core with a subtensor_engine to be copied. + */ + template + explicit inline tensor_core (const tensor_core> &other) + : tensor_expression_type{} + , _extents (ublas::begin(other.extents ()), ublas::end (other.extents ())) + , _strides (ublas::to_strides(_extents, layout_type{})) + , _container(ublas::product(_extents)) + { + detail::eval(*this, other); + } /** @brief Constructs a tensor_core with an tensor_core expression * @@ -421,13 +440,13 @@ template * @tparam spans */ template - [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) const noexcept { + [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) const { static_assert(sizeof...(spans)+1 == std::tuple_size_v); return subtensor_type(*this, std::forward(s), std::forward(spans)...); } template - [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) noexcept { + [[nodiscard]] inline decltype(auto) operator() (span_type&& s, SL&& ... spans) { static_assert(sizeof...(spans)+1 == std::tuple_size_v); return subtensor_type(*this, std::forward(s), std::forward(spans)...); } From 813362b1f8def67069707262f234bf0616b44315 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Sat, 21 Aug 2021 17:13:35 +0530 Subject: [PATCH 31/40] fix outer product for subtensors --- examples/tensor/access_subtensor.cpp | 20 ++++++++++++----- .../ublas/tensor/function/outer_prod.hpp | 22 +++++++++++++------ 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index a1655ee59..e7e346b17 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -21,11 +21,11 @@ int main() namespace ublas = boost::numeric::ublas; try { - using value = std::complex; + using value = float; using layout = ublas::layout::first_order; // storage format - using tensor = ublas::tensor_static_rank; + using tensor = ublas::tensor_dynamic; using span = ublas::span<>; - constexpr auto ones = ublas::ones_static_rank{}; + constexpr auto ones = ublas::ones{}; // creates a three-dimensional tensor with extents 3,4 and 2 @@ -33,12 +33,12 @@ int main() // to the first-order storage format tensor t1 = ones(3,3,2); - value cnt(0,0); + value cnt(0); for (auto i = 0u; i < t1.size(0); i++) { for (auto j = 0u; j < t1.size(1); j++) { for (auto k = 0u; k < t1.size(2); k++) { t1(i,j,k) = cnt; - cnt+= value(1,1); + cnt+= value(1); } } } @@ -51,12 +51,22 @@ int main() tensor t2 = ones(1,2,2); auto t3 = ublas::inner_prod(A, t2); + tensor p1 = ones(2,2); + tensor sp1 = p1(span(), span()); + tensor p2 = ones(2,2); + tensor sp2 = p2(span(), span()); + + sp1(0,1) = sp1(1,1) = 2; + sp2(0,1) = sp2(1,1) = 2; + // // // formatted output // std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "t1=" << t1 << ";" << std::endl << std::endl; std::cout << "A=" << A << ";" << std::endl << std::endl; std::cout << "t2=" << t2 << ";" << std::endl << std::endl; std::cout << "t3=" << t3 << ";" << std::endl << std::endl; + std::cout << "prod=" << ublas::outer_prod(sp1, sp2) << ";" << std::endl << std::endl; + } catch (const std::exception& e) { std::cerr << "Cought exception " << e.what(); std::cerr << " in the main function of access-tensor." << std::endl; diff --git a/include/boost/numeric/ublas/tensor/function/outer_prod.hpp b/include/boost/numeric/ublas/tensor/function/outer_prod.hpp index 2adb6fef7..29552fdf8 100644 --- a/include/boost/numeric/ublas/tensor/function/outer_prod.hpp +++ b/include/boost/numeric/ublas/tensor/function/outer_prod.hpp @@ -69,15 +69,23 @@ using enable_outer_if_one_extents_has_dynamic_rank = std::enable_if_t< template = true > inline auto outer_prod( tensor_core< TEA > const &a, tensor_core< TEB > const &b) { - using tensorA = tensor_core< TEA >; - using tensorB = tensor_core< TEB >; - using valueA = typename tensorA::value_type; - using extentsA = typename tensorA::extents_type; + using tensorA = tensor_core< TEA >; + using tensorB = tensor_core< TEB >; + + using valueA = typename tensorA::value_type; + using layoutA = typename tensorA::layout_type; + using extentsA = typename tensorA::extents_type; + using containerA = typename tensorA::container_type; + + using valueB = typename tensorB::value_type; + using layoutB = typename tensorB::layout_type; + using extentsB = typename tensorB::extents_type; + using containerB = typename tensorB::container_type; - using valueB = typename tensorB::value_type; - using extentsB = typename tensorB::extents_type; - using tensorC = std::conditional_t < is_dynamic_rank_v, tensorA, tensorB>; + using tensorC = std::conditional_t < is_dynamic_rank_v, + tensor_core>, + tensor_core>>; // using valueC = typename tensorC::value_type; using extentsC = typename tensorC::extents_type; From b515bb4e6669cf3f538d617888cc4805d34f07b2 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Sat, 21 Aug 2021 17:17:43 +0530 Subject: [PATCH 32/40] fix failing tests --- test/tensor/test_access.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/tensor/test_access.cpp b/test/tensor/test_access.cpp index f51838d0e..dc28a31e1 100644 --- a/test/tensor/test_access.cpp +++ b/test/tensor/test_access.cpp @@ -188,7 +188,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index, layout_t, layout_t auto const& jref = std::get(index); mp::mp_for_each>( [&]( auto K ) { auto const& ii = std::get(i); - auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,w.begin(), 0); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() ,w.begin()); BOOST_CHECK(j < prodn(n)); BOOST_CHECK_EQUAL(j,jref[K]); }); @@ -212,7 +212,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_static_rank, layout_ constexpr auto r = std::get(ranks); mp::mp_for_each>( [&]( auto K ) { auto const& ii = std::get(i); - auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() , w.begin(), 0); + auto const j = ub::detail::compute_single_index(ii.begin(), ii.end() , w.begin()); BOOST_CHECK(j < prodn(n)); BOOST_CHECK_EQUAL(j,jref[K]); }); From b96fe1bb0f4b845964d87f4789aee7b8c671a9d7 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Sun, 22 Aug 2021 16:42:29 +0530 Subject: [PATCH 33/40] WIP: Add tests for subtensor --- examples/tensor/access_subtensor.cpp | 11 +- include/boost/numeric/ublas/tensor/span.hpp | 7 +- .../ublas/tensor/tensor/subtensor_dynamic.hpp | 36 +- .../tensor/tensor/subtensor_static_rank.hpp | 63 +- .../ublas/tensor/tensor/tensor_dynamic.hpp | 19 - .../tensor/tensor/tensor_static_rank.hpp | 3 - .../ublas/tensor/traits/read_write_traits.hpp | 30 - test/tensor/Jamfile | 6 + test/tensor/test_span.cpp | 243 ++++---- .../test_subtensor_expression_evaluation.cpp | 287 +++++++++ test/tensor/test_subtensor_extents.cpp | 555 ++++++++++++++++++ test/tensor/test_subtensor_matrix_vector.cpp | 472 +++++++++++++++ .../test_subtensor_operators_arithmetic.cpp | 238 ++++++++ .../test_subtensor_operators_comparison.cpp | 197 +++++++ 14 files changed, 1899 insertions(+), 268 deletions(-) delete mode 100644 include/boost/numeric/ublas/tensor/traits/read_write_traits.hpp create mode 100644 test/tensor/test_subtensor_expression_evaluation.cpp create mode 100644 test/tensor/test_subtensor_extents.cpp create mode 100644 test/tensor/test_subtensor_matrix_vector.cpp create mode 100644 test/tensor/test_subtensor_operators_arithmetic.cpp create mode 100644 test/tensor/test_subtensor_operators_comparison.cpp diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index e7e346b17..fbe994175 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -44,6 +44,7 @@ int main() } auto A = t1(span(1), span(0,2,2), span()); auto B = A(span(), span(), span()); + A += B; std::cout << "% --------------------------- " << std::endl; for (auto x: B.extents().base()) { std::cout << x << " "; @@ -51,22 +52,12 @@ int main() tensor t2 = ones(1,2,2); auto t3 = ublas::inner_prod(A, t2); - tensor p1 = ones(2,2); - tensor sp1 = p1(span(), span()); - tensor p2 = ones(2,2); - tensor sp2 = p2(span(), span()); - - sp1(0,1) = sp1(1,1) = 2; - sp2(0,1) = sp2(1,1) = 2; - // // // formatted output // std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "t1=" << t1 << ";" << std::endl << std::endl; std::cout << "A=" << A << ";" << std::endl << std::endl; std::cout << "t2=" << t2 << ";" << std::endl << std::endl; std::cout << "t3=" << t3 << ";" << std::endl << std::endl; - std::cout << "prod=" << ublas::outer_prod(sp1, sp2) << ";" << std::endl << std::endl; - } catch (const std::exception& e) { std::cerr << "Cought exception " << e.what(); std::cerr << " in the main function of access-tensor." << std::endl; diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 4b195f068..2962fb04c 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -1,4 +1,5 @@ -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2018-2021, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2021, Kannav Mehta, kmkannavkmehta@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -35,7 +36,7 @@ namespace boost::numeric::ublas { * */ -static constexpr inline std::size_t max = std::numeric_limits::max(); +static constexpr inline std::size_t max = std::numeric_limits::max() - 1; template class span @@ -115,7 +116,7 @@ class span return span( rhs.first_*lhs.step_ + lhs.first_, lhs.step_ *rhs.step_, - rhs.last_ *lhs.step_ + lhs.first_ ); + std::min(rhs.last_,size()) *lhs.step_ + lhs.first_ ); } protected: diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index c1a3eae9e..a7b1c6c85 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -25,7 +25,6 @@ #include "../layout.hpp" #include "../span.hpp" #include "../tags.hpp" -#include "../traits/read_write_traits.hpp" #include "../type_traits.hpp" #include "../subtensor_utility.hpp" @@ -38,22 +37,18 @@ namespace boost::numeric::ublas { template class tensor_core>> : public detail::tensor_expression< - tensor_core>>, - tensor_core>> - > { + tensor_dynamic, tensor_dynamic> { public: using tensor_type = tensor_dynamic; using engine_type = subtensor_engine; using self_type = tensor_core; template - using tensor_expression_type = detail::tensor_expression; + using tensor_expression_type = detail::tensor_expression; template using matrix_expression_type = matrix_expression; template using vector_expression_type = vector_expression; - template - using parent_tensor_expression_type = detail::tensor_expression; // template struct subtensor_iterator { // }; @@ -104,7 +99,7 @@ class tensor_core>> tensor_core(const tensor_core&) = default; tensor_core(tensor_type& t) - : tensor_expression_type{} + : tensor_expression_type{} , _spans() , _extents(t.extents()) , _strides(t.strides()) @@ -116,7 +111,7 @@ class tensor_core>> template tensor_core(U&& t, FS&& first, SL&&... spans) - : tensor_expression_type{} + : tensor_expression_type{} , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) , _extents{} , _strides{detail::to_span_strides(t.strides(), _spans)} @@ -144,7 +139,7 @@ class tensor_core>> // TODO // template // tensor_core(const tensor_core& t, FS&& first, SL&&... spans) - // : tensor_expression_type{} + // : tensor_expression_type{} // , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) // , _extents{} // , _strides{} @@ -157,7 +152,7 @@ class tensor_core>> // } tensor_core(tensor_core&& v) - : tensor_expression_type{} + : tensor_expression_type{} , _spans (std::move(v._spans)) , _extents(std::move(v._extents)) , _strides(std::move(v._strides)) @@ -188,23 +183,6 @@ class tensor_core>> return *this; } - /** @brief Evaluates the tensor_expression and assigns the results to the - * tensor_core - * - * @code A = B + C * 2; @endcode - * - * @note rank and dimension extents of the tensors in the expressions must - * conform with this tensor_core. - * - * @param expr expression that is evaluated. - */ - template - tensor_core& operator=(const parent_tensor_expression_type& expr) - { - detail::eval(*this, expr); - return *this; - } - // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) tensor_core& operator=(tensor_core other) noexcept { @@ -235,7 +213,7 @@ class tensor_core>> { if (sizeof...(is) + 2 != this->order()) { throw std::invalid_argument( - "boost::numeric::ublas::tensor_core::at : " + "boost::numeric::ublas::tensor_core>::at : " "Cannot access tensor with multi-index. " "Number of provided indices does not match with tensor order."); } diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp index 4ab157b48..1a4b882f5 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -25,7 +25,6 @@ #include "../layout.hpp" #include "../span.hpp" #include "../tags.hpp" -#include "../traits/read_write_traits.hpp" #include "../type_traits.hpp" #include "../subtensor_utility.hpp" @@ -38,23 +37,19 @@ namespace boost::numeric::ublas { template class tensor_core>>> : public detail::tensor_expression< - tensor_core>>>, - tensor_core>>>> { + tensor_core>, + tensor_core>> { public: using tensor_type = tensor_core>; using engine_type = subtensor_engine; using self_type = tensor_core; template - using tensor_expression_type = detail::tensor_expression; + using tensor_expression_type = detail::tensor_expression; template using matrix_expression_type = matrix_expression; template using vector_expression_type = vector_expression; - template - using parent_tensor_expression_type = detail::tensor_expression; // template struct subtensor_iterator { // }; @@ -72,15 +67,15 @@ class tensor_core> using difference_type = typename container_traits_type::difference_type; using value_type = typename container_traits_type::value_type; - using reference = std::conditional_t; + using reference = std::conditional_t; using const_reference = typename container_traits_type::const_reference; - using pointer = std::conditional_t; - using const_pointer = typename container_traits_type::const_pointer; + using pointer = std::conditional_t; + using const_pointer = typename container_traits_type::const_pointer; // using iterator = typename self_type::subtensor_iterator; // using const_iterator = @@ -90,22 +85,21 @@ class tensor_core> // using const_reverse_iterator = // typename container_traits_type::const_reverse_iterator; - using matrix_type = matrix >; - using vector_type = vector >; - - using container_tag = typename container_traits_type::container_tag; - using resizable_tag = typename container_traits_type::resizable_tag; + using matrix_type = matrix >; + using vector_type = vector >; - using span_type = span; + using container_tag = typename container_traits_type::container_tag; + using resizable_tag = typename container_traits_type::resizable_tag; - using subtensor_type = self_type; + using span_type = span; + using subtensor_type = self_type; explicit tensor_core() = delete; tensor_core(const tensor_core&) = default; tensor_core(tensor_type& t) - : tensor_expression_type{} + : tensor_expression_type{} , _spans() , _extents(t.extents()) , _strides(t.strides()) @@ -117,7 +111,7 @@ class tensor_core> template tensor_core(U&& t, FS&& first, SL&&... spans) - : tensor_expression_type{} + : tensor_expression_type{} , _spans(detail::generate_span_array(t.extents(), std::forward(first), std::forward(spans)...)) , _extents{} , _strides{detail::to_span_strides(t.strides(), _spans)} @@ -132,7 +126,7 @@ class tensor_core> // TODO // template // tensor_core(const tensor_core& t, FS&& first, SL&&... spans) - // : tensor_expression_type{} + // : tensor_expression_type{} // , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) // , _extents{} // , _strides{} @@ -145,7 +139,7 @@ class tensor_core> // } tensor_core(tensor_core&& v) - : tensor_expression_type{} + : tensor_expression_type{} , _spans (std::move(v._spans)) , _extents(std::move(v._extents)) , _strides(std::move(v._strides)) @@ -176,23 +170,6 @@ class tensor_core> return *this; } - /** @brief Evaluates the tensor_expression and assigns the results to the - * tensor_core - * - * @code A = B + C * 2; @endcode - * - * @note rank and dimension extents of the tensors in the expressions must - * conform with this tensor_core. - * - * @param expr expression that is evaluated. - */ - template - tensor_core& operator=(const parent_tensor_expression_type& expr) - { - detail::eval(*this, expr); - return *this; - } - // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) tensor_core& operator=(tensor_core other) noexcept { diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index 95e2ec4b3..356da1246 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -90,10 +90,6 @@ template using subtensor_type = tensor_core>; - template - using subtensor_expression_type = detail::tensor_expression; - - explicit tensor_core () = default; /** @brief Constructs a tensor_core with a \c shape @@ -300,21 +296,6 @@ template return *this; } - /** @brief Evaluates the tensor_expression and assigns the results to the tensor_core - * - * @code A = B + C * 2; @endcode - * - * @note rank and dimension extents of the tensors in the expressions must conform with this tensor_core. - * - * @param expr expression that is evaluated. - */ - template - tensor_core &operator = (const subtensor_expression_type &expr) - { - detail::eval(*this, expr); - return *this; - } - // NOLINTNEXTLINE(cppcoreguidelines-special-member-functions,hicpp-special-member-functions) tensor_core& operator=(tensor_core other) noexcept { diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp index a66b2bb7a..2719e279b 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_static_rank.hpp @@ -92,9 +92,6 @@ template using span_type = span; using subtensor_type = tensor_core>; - template - using subtensor_expression_type = detail::tensor_expression; - tensor_core () = default; /** @brief Constructs a tensor_core with a \c shape diff --git a/include/boost/numeric/ublas/tensor/traits/read_write_traits.hpp b/include/boost/numeric/ublas/tensor/traits/read_write_traits.hpp deleted file mode 100644 index a9a456eed..000000000 --- a/include/boost/numeric/ublas/tensor/traits/read_write_traits.hpp +++ /dev/null @@ -1,30 +0,0 @@ -// -// Copyright (c) 2020, Kannav Mehta, kmkannavkmehta@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// - -#ifndef BOOST_UBLAS_TENSOR_READ_WRITE_TYPE_TRAITS_HPP -#define BOOST_UBLAS_TENSOR_READ_WRITE_TYPE_TRAITS_HPP - -#include -#include - -#include "../tags.hpp" - -namespace boost::numeric::ublas::detail { - -template -struct is_read_write : std::false_type {}; - -template<> -struct is_read_write : std::true_type {}; - -template -inline static constexpr bool is_read_write_v = is_read_write::value; - -} // namespace boost::numeric::ublas - -#endif diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index a2446beb9..99f1ff47f 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -54,6 +54,7 @@ test-suite boost-ublas-tensor-test test_multiplication.cpp test_operators_arithmetic.cpp test_operators_comparison.cpp + # test_span.cpp test_static_expression_evaluation.cpp test_static_extents.cpp test_static_operators_arithmetic.cpp @@ -63,6 +64,11 @@ test-suite boost-ublas-tensor-test test_static_tensor_matrix_vector.cpp test_strides.cpp # test_subtensor.cpp + # test_subtensor_expression_evaluation.cpp + # test_subtensor_extents.cpp + # test_subtensor_matrix_vector.cpp + # test_subtensor_operators_arithmetic.cpp + # test_subtensor_operators_comparison.cpp test_subtensor_utility.cpp test_tensor.cpp test_tensor_matrix_vector.cpp diff --git a/test/tensor/test_span.cpp b/test/tensor/test_span.cpp index 9df47d378..4a66cb030 100644 --- a/test/tensor/test_span.cpp +++ b/test/tensor/test_span.cpp @@ -17,19 +17,21 @@ BOOST_AUTO_TEST_SUITE( span_testsuite ); struct fixture { - using span_type = boost::numeric::ublas::pan; + using span_type = boost::numeric::ublas::span<>; fixture() : spans { span_type{}, // 0 - span_type(0,0,0), // 1 - span_type(0,1,0), // 2 - span_type(0,1,2), // 3 - span_type(1,1,2), // 4 - span_type(0,2,4), // 5 - span_type(1,2,4), // 6 - span_type(1,3,5), // 7 - span_type(1,3,7) // 8 + span_type(0,4), // 1 + span_type(2,6), // 2 + span_type(0,0,0), // 3 + span_type(0,1,0), // 4 + span_type(0,1,2), // 5 + span_type(1,1,2), // 6 + span_type(0,2,4), // 7 + span_type(1,2,4), // 8 + span_type(1,3,5), // 9 + span_type(1,3,7) // 10 } {} std::vector spans; @@ -42,49 +44,58 @@ BOOST_FIXTURE_TEST_CASE( ctor_test, fixture ) using span_type = boost::numeric::ublas::span<>; BOOST_CHECK_EQUAL (spans[0].first(),0); - BOOST_CHECK_EQUAL (spans[0].step (),0); - BOOST_CHECK_EQUAL (spans[0].last (),0); - BOOST_CHECK_EQUAL (spans[0].size (),0); + BOOST_CHECK_EQUAL (spans[0].step (),1); + BOOST_CHECK_EQUAL (spans[0].last (),boost::numeric::ublas::max); BOOST_CHECK_EQUAL (spans[1].first(),0); - BOOST_CHECK_EQUAL (spans[1].step (),0); - BOOST_CHECK_EQUAL (spans[1].last (),0); - BOOST_CHECK_EQUAL (spans[1].size (),1); + BOOST_CHECK_EQUAL (spans[1].step (),1); + BOOST_CHECK_EQUAL (spans[1].last (),4); + BOOST_CHECK_EQUAL (spans[1].size (),4); - BOOST_CHECK_EQUAL (spans[2].first(),0); + BOOST_CHECK_EQUAL (spans[2].first(),2); BOOST_CHECK_EQUAL (spans[2].step (),1); - BOOST_CHECK_EQUAL (spans[2].last (),0); - BOOST_CHECK_EQUAL (spans[2].size (),1); + BOOST_CHECK_EQUAL (spans[2].last (),6); + BOOST_CHECK_EQUAL (spans[2].size (),5); BOOST_CHECK_EQUAL (spans[3].first(),0); - BOOST_CHECK_EQUAL (spans[3].step (),1); - BOOST_CHECK_EQUAL (spans[3].last (),2); - BOOST_CHECK_EQUAL (spans[3].size (),3); + BOOST_CHECK_EQUAL (spans[3].step (),0); + BOOST_CHECK_EQUAL (spans[3].last (),0); + BOOST_CHECK_EQUAL (spans[3].size (),1); - BOOST_CHECK_EQUAL (spans[4].first(),1); + BOOST_CHECK_EQUAL (spans[4].first(),0); BOOST_CHECK_EQUAL (spans[4].step (),1); - BOOST_CHECK_EQUAL (spans[4].last (),2); - BOOST_CHECK_EQUAL (spans[4].size (),2); + BOOST_CHECK_EQUAL (spans[4].last (),0); + BOOST_CHECK_EQUAL (spans[4].size (),1); BOOST_CHECK_EQUAL (spans[5].first(),0); - BOOST_CHECK_EQUAL (spans[5].step (),2); - BOOST_CHECK_EQUAL (spans[5].last (),4); + BOOST_CHECK_EQUAL (spans[5].step (),1); + BOOST_CHECK_EQUAL (spans[5].last (),2); BOOST_CHECK_EQUAL (spans[5].size (),3); BOOST_CHECK_EQUAL (spans[6].first(),1); - BOOST_CHECK_EQUAL (spans[6].step (),2); - BOOST_CHECK_EQUAL (spans[6].last (),3); + BOOST_CHECK_EQUAL (spans[6].step (),1); + BOOST_CHECK_EQUAL (spans[6].last (),2); BOOST_CHECK_EQUAL (spans[6].size (),2); - BOOST_CHECK_EQUAL (spans[7].first(),1); - BOOST_CHECK_EQUAL (spans[7].step (),3); + BOOST_CHECK_EQUAL (spans[7].first(),0); + BOOST_CHECK_EQUAL (spans[7].step (),2); BOOST_CHECK_EQUAL (spans[7].last (),4); - BOOST_CHECK_EQUAL (spans[7].size (),2); + BOOST_CHECK_EQUAL (spans[7].size (),3); BOOST_CHECK_EQUAL (spans[8].first(),1); - BOOST_CHECK_EQUAL (spans[8].step (),3); - BOOST_CHECK_EQUAL (spans[8].last (),7); - BOOST_CHECK_EQUAL (spans[8].size (),3); + BOOST_CHECK_EQUAL (spans[8].step (),2); + BOOST_CHECK_EQUAL (spans[8].last (),3); + BOOST_CHECK_EQUAL (spans[8].size (),2); + + BOOST_CHECK_EQUAL (spans[9].first(),1); + BOOST_CHECK_EQUAL (spans[9].step (),3); + BOOST_CHECK_EQUAL (spans[9].last (),4); + BOOST_CHECK_EQUAL (spans[9].size (),2); + + BOOST_CHECK_EQUAL (spans[10].first(),1); + BOOST_CHECK_EQUAL (spans[10].step (),3); + BOOST_CHECK_EQUAL (spans[10].last (),7); + BOOST_CHECK_EQUAL (spans[10].size (),3); BOOST_CHECK_THROW ( span_type( 1,0,3 ), std::runtime_error ); @@ -100,51 +111,58 @@ BOOST_FIXTURE_TEST_CASE( copy_ctor_test, fixture ) BOOST_CHECK_EQUAL (span_type(spans[0]).first(),0); - BOOST_CHECK_EQUAL (span_type(spans[0]).step (),0); - BOOST_CHECK_EQUAL (span_type(spans[0]).last (),0); - BOOST_CHECK_EQUAL (span_type(spans[0]).size (),0); + BOOST_CHECK_EQUAL (span_type(spans[0]).step (),1); + BOOST_CHECK_EQUAL (span_type(spans[0]).last (),boost::numeric::ublas::max); BOOST_CHECK_EQUAL (span_type(spans[1]).first(),0); - BOOST_CHECK_EQUAL (span_type(spans[1]).step (),0); - BOOST_CHECK_EQUAL (span_type(spans[1]).last (),0); - BOOST_CHECK_EQUAL (span_type(spans[1]).size (),1); + BOOST_CHECK_EQUAL (span_type(spans[1]).step (),1); + BOOST_CHECK_EQUAL (span_type(spans[1]).last (),4); + BOOST_CHECK_EQUAL (span_type(spans[1]).size (),4); - BOOST_CHECK_EQUAL (span_type(spans[2]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[2]).first(),2); BOOST_CHECK_EQUAL (span_type(spans[2]).step (),1); - BOOST_CHECK_EQUAL (span_type(spans[2]).last (),0); - BOOST_CHECK_EQUAL (span_type(spans[2]).size (),1); + BOOST_CHECK_EQUAL (span_type(spans[2]).last (),6); + BOOST_CHECK_EQUAL (span_type(spans[2]).size (),5); BOOST_CHECK_EQUAL (span_type(spans[3]).first(),0); - BOOST_CHECK_EQUAL (span_type(spans[3]).step (),1); - BOOST_CHECK_EQUAL (span_type(spans[3]).last (),2); - BOOST_CHECK_EQUAL (span_type(spans[3]).size (),3); + BOOST_CHECK_EQUAL (span_type(spans[3]).step (),0); + BOOST_CHECK_EQUAL (span_type(spans[3]).last (),0); + BOOST_CHECK_EQUAL (span_type(spans[3]).size (),1); - BOOST_CHECK_EQUAL (span_type(spans[4]).first(),1); + BOOST_CHECK_EQUAL (span_type(spans[4]).first(),0); BOOST_CHECK_EQUAL (span_type(spans[4]).step (),1); - BOOST_CHECK_EQUAL (span_type(spans[4]).last (),2); - BOOST_CHECK_EQUAL (span_type(spans[4]).size (),2); - + BOOST_CHECK_EQUAL (span_type(spans[4]).last (),0); + BOOST_CHECK_EQUAL (span_type(spans[4]).size (),1); BOOST_CHECK_EQUAL (span_type(spans[5]).first(),0); - BOOST_CHECK_EQUAL (span_type(spans[5]).step (),2); - BOOST_CHECK_EQUAL (span_type(spans[5]).last (),4); + BOOST_CHECK_EQUAL (span_type(spans[5]).step (),1); + BOOST_CHECK_EQUAL (span_type(spans[5]).last (),2); BOOST_CHECK_EQUAL (span_type(spans[5]).size (),3); BOOST_CHECK_EQUAL (span_type(spans[6]).first(),1); - BOOST_CHECK_EQUAL (span_type(spans[6]).step (),2); - BOOST_CHECK_EQUAL (span_type(spans[6]).last (),3); + BOOST_CHECK_EQUAL (span_type(spans[6]).step (),1); + BOOST_CHECK_EQUAL (span_type(spans[6]).last (),2); BOOST_CHECK_EQUAL (span_type(spans[6]).size (),2); - BOOST_CHECK_EQUAL (span_type(spans[7]).first(),1); - BOOST_CHECK_EQUAL (span_type(spans[7]).step (),3); + BOOST_CHECK_EQUAL (span_type(spans[7]).first(),0); + BOOST_CHECK_EQUAL (span_type(spans[7]).step (),2); BOOST_CHECK_EQUAL (span_type(spans[7]).last (),4); - BOOST_CHECK_EQUAL (span_type(spans[7]).size (),2); + BOOST_CHECK_EQUAL (span_type(spans[7]).size (),3); BOOST_CHECK_EQUAL (span_type(spans[8]).first(),1); - BOOST_CHECK_EQUAL (span_type(spans[8]).step (),3); - BOOST_CHECK_EQUAL (span_type(spans[8]).last (),7); - BOOST_CHECK_EQUAL (span_type(spans[8]).size (),3); + BOOST_CHECK_EQUAL (span_type(spans[8]).step (),2); + BOOST_CHECK_EQUAL (span_type(spans[8]).last (),3); + BOOST_CHECK_EQUAL (span_type(spans[8]).size (),2); + BOOST_CHECK_EQUAL (span_type(spans[9]).first(),1); + BOOST_CHECK_EQUAL (span_type(spans[9]).step (),3); + BOOST_CHECK_EQUAL (span_type(spans[9]).last (),4); + BOOST_CHECK_EQUAL (span_type(spans[9]).size (),2); + + BOOST_CHECK_EQUAL (span_type(spans[10]).first(),1); + BOOST_CHECK_EQUAL (span_type(spans[10]).step (),3); + BOOST_CHECK_EQUAL (span_type(spans[10]).last (),7); + BOOST_CHECK_EQUAL (span_type(spans[10]).size (),3); } @@ -153,106 +171,69 @@ BOOST_FIXTURE_TEST_CASE( assignment_operator_test, fixture ) { auto c0 = spans[1]; BOOST_CHECK_EQUAL ((c0=spans[0]).first(),0); - BOOST_CHECK_EQUAL ((c0=spans[0]).step (),0); - BOOST_CHECK_EQUAL ((c0=spans[0]).last (),0); - BOOST_CHECK_EQUAL ((c0=spans[0]).size (),0); + BOOST_CHECK_EQUAL ((c0=spans[0]).step (),1); + BOOST_CHECK_EQUAL ((c0=spans[0]).last (),boost::numeric::ublas::max); auto c1 = spans[2]; BOOST_CHECK_EQUAL ((c1=spans[1]).first(),0); - BOOST_CHECK_EQUAL ((c1=spans[1]).step (),0); - BOOST_CHECK_EQUAL ((c1=spans[1]).last (),0); - BOOST_CHECK_EQUAL ((c1=spans[1]).size (),1); + BOOST_CHECK_EQUAL ((c1=spans[1]).step (),1); + BOOST_CHECK_EQUAL ((c1=spans[1]).last (),4); + BOOST_CHECK_EQUAL ((c1=spans[1]).size (),4); auto c2 = spans[3]; - BOOST_CHECK_EQUAL ((c2=spans[2]).first(),0); + BOOST_CHECK_EQUAL ((c2=spans[2]).first(),2); BOOST_CHECK_EQUAL ((c2=spans[2]).step (),1); - BOOST_CHECK_EQUAL ((c2=spans[2]).last (),0); - BOOST_CHECK_EQUAL ((c2=spans[2]).size (),1); + BOOST_CHECK_EQUAL ((c2=spans[2]).last (),6); + BOOST_CHECK_EQUAL ((c2=spans[2]).size (),5); auto c3 = spans[4]; BOOST_CHECK_EQUAL ((c3=spans[3]).first(),0); - BOOST_CHECK_EQUAL ((c3=spans[3]).step (),1); - BOOST_CHECK_EQUAL ((c3=spans[3]).last (),2); - BOOST_CHECK_EQUAL ((c3=spans[3]).size (),3); + BOOST_CHECK_EQUAL ((c3=spans[3]).step (),0); + BOOST_CHECK_EQUAL ((c3=spans[3]).last (),0); + BOOST_CHECK_EQUAL ((c3=spans[3]).size (),1); auto c4 = spans[5]; - BOOST_CHECK_EQUAL ((c4=spans[4]).first(),1); + BOOST_CHECK_EQUAL ((c4=spans[4]).first(),0); BOOST_CHECK_EQUAL ((c4=spans[4]).step (),1); - BOOST_CHECK_EQUAL ((c4=spans[4]).last (),2); - BOOST_CHECK_EQUAL ((c4=spans[4]).size (),2); + BOOST_CHECK_EQUAL ((c4=spans[4]).last (),0); + BOOST_CHECK_EQUAL ((c4=spans[4]).size (),1); auto c5 = spans[6]; BOOST_CHECK_EQUAL ((c5=spans[5]).first(),0); - BOOST_CHECK_EQUAL ((c5=spans[5]).step (),2); - BOOST_CHECK_EQUAL ((c5=spans[5]).last (),4); + BOOST_CHECK_EQUAL ((c5=spans[5]).step (),1); + BOOST_CHECK_EQUAL ((c5=spans[5]).last (),2); BOOST_CHECK_EQUAL ((c5=spans[5]).size (),3); auto c6 = spans[7]; BOOST_CHECK_EQUAL ((c6=spans[6]).first(),1); - BOOST_CHECK_EQUAL ((c6=spans[6]).step (),2); - BOOST_CHECK_EQUAL ((c6=spans[6]).last (),3); + BOOST_CHECK_EQUAL ((c6=spans[6]).step (),1); + BOOST_CHECK_EQUAL ((c6=spans[6]).last (),2); BOOST_CHECK_EQUAL ((c6=spans[6]).size (),2); auto c7 = spans[8]; - BOOST_CHECK_EQUAL ((c7=spans[7]).first(),1); - BOOST_CHECK_EQUAL ((c7=spans[7]).step (),3); + BOOST_CHECK_EQUAL ((c7=spans[7]).first(),0); + BOOST_CHECK_EQUAL ((c7=spans[7]).step (),2); BOOST_CHECK_EQUAL ((c7=spans[7]).last (),4); - BOOST_CHECK_EQUAL ((c7=spans[7]).size (),2); - -} - -BOOST_FIXTURE_TEST_CASE( access_operator_test, fixture ) -{ - - BOOST_CHECK_EQUAL(spans[0][0], 0); - - BOOST_CHECK_EQUAL(spans[1][0], 0); - - BOOST_CHECK_EQUAL(spans[2][0], 0); + BOOST_CHECK_EQUAL ((c7=spans[7]).size (),3); - BOOST_CHECK_EQUAL(spans[3][0], 0); - BOOST_CHECK_EQUAL(spans[3][1], 1); - BOOST_CHECK_EQUAL(spans[3][2], 2); + auto c8 = spans[9]; + BOOST_CHECK_EQUAL ((c8=spans[8]).first(),1); + BOOST_CHECK_EQUAL ((c8=spans[8]).step (),2); + BOOST_CHECK_EQUAL ((c8=spans[8]).last (),3); + BOOST_CHECK_EQUAL ((c8=spans[8]).size (),2); - BOOST_CHECK_EQUAL(spans[4][0], 1); - BOOST_CHECK_EQUAL(spans[4][1], 2); - - BOOST_CHECK_EQUAL(spans[5][0], 0); - BOOST_CHECK_EQUAL(spans[5][1], 2); - BOOST_CHECK_EQUAL(spans[5][2], 4); - - BOOST_CHECK_EQUAL(spans[6][0], 1); - BOOST_CHECK_EQUAL(spans[6][1], 3); - - BOOST_CHECK_EQUAL(spans[7][0], 1); - BOOST_CHECK_EQUAL(spans[7][1], 4); - - BOOST_CHECK_EQUAL(spans[8][0], 1); - BOOST_CHECK_EQUAL(spans[8][1], 4); - BOOST_CHECK_EQUAL(spans[8][2], 7); + auto c9 = spans[10]; + BOOST_CHECK_EQUAL ((c9=spans[9]).first(),1); + BOOST_CHECK_EQUAL ((c9=spans[9]).step (),3); + BOOST_CHECK_EQUAL ((c9=spans[9]).last (),4); + BOOST_CHECK_EQUAL ((c9=spans[9]).size (),2); } -BOOST_FIXTURE_TEST_CASE( ran_test, fixture ) +BOOST_FIXTURE_TEST_CASE( function_operator_test, fixture ) { - using namespace boost::numeric::ublas; - - BOOST_CHECK ( ( ran(0,0,0) == spans[0]) ); - - BOOST_CHECK ( ( ran(0,1,0) == spans[2]) ); - BOOST_CHECK ( ( ran(0, 0) == spans[2]) ); - - - BOOST_CHECK ( ( ran(0,1,2) == spans[3]) ); - BOOST_CHECK ( ( ran(0, 2) == spans[3]) ); - - BOOST_CHECK ( ( ran(1,1,2) == spans[4]) ); - BOOST_CHECK ( ( ran(1, 2) == spans[4]) ); - - BOOST_CHECK ( ( ran(0,2,4) == spans[5]) ); - BOOST_CHECK ( ( ran(1,2,4) == spans[6]) ); - BOOST_CHECK ( ( ran(1,3,5) == spans[7]) ); - BOOST_CHECK ( ( ran(1,3,7) == spans[8]) ); + } + BOOST_AUTO_TEST_SUITE_END(); diff --git a/test/tensor/test_subtensor_expression_evaluation.cpp b/test/tensor/test_subtensor_expression_evaluation.cpp new file mode 100644 index 000000000..a1e06885b --- /dev/null +++ b/test/tensor/test_subtensor_expression_evaluation.cpp @@ -0,0 +1,287 @@ +// +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + + +#include +#include +#include + +#include "utility.hpp" + +#include +#include +#include + +BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_expression) + +using test_types = zip>::with_t; + + + + +struct fixture +{ + template + using extents_t = boost::numeric::ublas::extents; + + static constexpr auto extents = + std::make_tuple( +// extents_t<0> {}, + extents_t<2> {1,1}, + extents_t<2> {1,2}, + extents_t<2> {2,1}, + extents_t<2> {2,3}, + extents_t<3> {2,3,1}, + extents_t<3> {4,1,3}, + extents_t<3> {1,2,3}, + extents_t<3> {4,2,3}, + extents_t<4>{4,2,3,5} ); +}; + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_retrieve_extents, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto uplus1 = [](auto const& a){return a + value_t(1); }; + auto uplus2 = [](auto const& a){return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; + + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto const& e){ + + + + static constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + + + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } + + + BOOST_CHECK( ublas::detail::retrieve_extents( t ) == e ); + + // uexpr1 = t+1 + // uexpr2 = 2+t + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); + + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) == e ); + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr2 ) == e ); + + // bexpr_uexpr = (t+1) + (2+t) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == e ); + + + // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr ) == e ); + + }); + + for_each_in_tuple(extents, [&](auto I, auto const& e1){ + + + if ( I >= std::tuple_size_v - 1 ){ + return; + } + + constexpr auto size1 = std::tuple_size_v>; + using tensor_type1 = ublas::tensor_static_rank; + + for_each_in_tuple(extents, [&,I](auto J, auto const& e2){ + + if( J != I + 1 ){ + return; + } + + static constexpr auto size1 = std::tuple_size_v>; + static constexpr auto size2 = std::tuple_size_v>; + using tensor_type2 = ublas::tensor_static_rank; + + auto v = value_t{}; + + tensor_type1 t1(e1); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } + + tensor_type2 t2(e2); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } + + BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) != ublas::detail::retrieve_extents( t2 ) ); + + // uexpr1 = t1+1 + // uexpr2 = 2+t2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); + + BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( t2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) != ublas::detail::retrieve_extents( uexpr2 ) ); + + if constexpr( size1 == size2 ){ + // bexpr_uexpr = (t1+1) + (2+t2) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(t1) ); + + + // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(t2) ); + + + // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(t2) ); + } + + }); + }); +} + + + + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_all_extents_equal, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto uplus1 = [](auto const& a){return a + value_t(1); }; + auto uplus2 = [](auto const& a){return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; + + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto& e){ + static constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + + + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } + + + BOOST_CHECK( ublas::detail::all_extents_equal( t , e ) ); + + + // uexpr1 = t+1 + // uexpr2 = 2+t + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); + + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, e ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, e ) ); + + // bexpr_uexpr = (t+1) + (2+t) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + + BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_uexpr, e ) ); + + + // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + + BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_bexpr_uexpr , e ) ); + + }); + + + for_each_in_tuple(extents, [&](auto I, auto& e1){ + + if ( I >= std::tuple_size_v - 1){ + return; + } + + static constexpr auto size1 = std::tuple_size_v>; + using tensor_type1 = ublas::tensor_static_rank; + + for_each_in_tuple(extents, [&](auto J, auto& e2){ + + if( J != I + 1 ){ + return; + } + + + static constexpr auto size2 = std::tuple_size_v>; + using tensor_type2 = ublas::tensor_static_rank; + + auto v = value_t{}; + + tensor_type1 t1(e1); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } + + tensor_type2 t2(e2); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } + + BOOST_CHECK( ublas::detail::all_extents_equal( t1, ublas::detail::retrieve_extents(t1) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( t2, ublas::detail::retrieve_extents(t2) ) ); + + // uexpr1 = t1+1 + // uexpr2 = 2+t2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); + + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, ublas::detail::retrieve_extents(uexpr1) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, ublas::detail::retrieve_extents(uexpr2) ) ); + + if constexpr( size1 == size2 ){ + // bexpr_uexpr = (t1+1) + (2+t2) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr, ublas::detail::retrieve_extents( bexpr_uexpr ) ) ); + + // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr1, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) ) ); + + // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) ) ); + + + // bexpr_uexpr2 = (t1+1) + t2 + auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, t2, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_uexpr2 ) ) ); + + + // bexpr_uexpr2 = ((t1+1) + t2) + t1 + auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr3, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr3 ) ) ); + + // bexpr_uexpr2 = t1 + (((t1+1) + t2) + t1) + auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr4, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr4 ) ) ); + } + + }); + }); + +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_extents.cpp b/test/tensor/test_subtensor_extents.cpp new file mode 100644 index 000000000..ac873f55c --- /dev/null +++ b/test/tensor/test_subtensor_extents.cpp @@ -0,0 +1,555 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + +#include +#include +#include + +BOOST_AUTO_TEST_SUITE ( test_extents_static_size ) + + +//*boost::unit_test::label("extents") +//*boost::unit_test::label("constructor") + +BOOST_AUTO_TEST_CASE(test_extents_static_size_ctor) +{ + namespace ub = boost::numeric::ublas; + + +// auto e = ub::extents<0>{}; + auto e11 = ub::extents<2>{1,1}; + auto e12 = ub::extents<2>{1,2}; + auto e21 = ub::extents<2>{2,1}; + auto e23 = ub::extents<2>{2,3}; + auto e231 = ub::extents<3>{2,3,1}; + auto e123 = ub::extents<3>{1,2,3}; // 6 + auto e423 = ub::extents<3>{4,2,3}; // 7 + + + BOOST_CHECK (!ub::empty(e11)); + BOOST_CHECK (!ub::empty(e12)); + BOOST_CHECK (!ub::empty(e21)); + BOOST_CHECK (!ub::empty(e23)); + BOOST_CHECK (!ub::empty(e231)); + BOOST_CHECK (!ub::empty(e123)); + BOOST_CHECK (!ub::empty(e423)); + + BOOST_CHECK ( ub::size (e11) == 2); + BOOST_CHECK ( ub::size (e12) == 2); + BOOST_CHECK ( ub::size (e21) == 2); + BOOST_CHECK ( ub::size (e23) == 2); + BOOST_CHECK ( ub::size(e231) == 3); + BOOST_CHECK ( ub::size(e123) == 3); + BOOST_CHECK ( ub::size(e423) == 3); + + + BOOST_CHECK_THROW( ub::extents<2>({1,0}), std::invalid_argument); + BOOST_CHECK_THROW( ub::extents<1>({0} ), std::invalid_argument); + BOOST_CHECK_THROW( ub::extents<2>({0,1}), std::invalid_argument); + BOOST_CHECK_THROW( ub::extents<2>({1,1,2}), std::length_error); +} + + +struct fixture { + template + using extents = boost::numeric::ublas::extents; + +// extents<0> de {}; + + extents<2> de11 {1,1}; + extents<2> de12 {1,2}; + extents<2> de21 {2,1}; + + extents<2> de23 {2,3}; + extents<3> de231 {2,3,1}; + extents<3> de123 {1,2,3}; + extents<4> de1123 {1,1,2,3}; + extents<5> de12311 {1,2,3,1,1}; + + extents<3> de423 {4,2,3}; + extents<4> de4213 {4,2,1,3}; + extents<5> de42131 {4,2,1,3,1}; + extents<6> de142131 {1,4,2,1,3,1}; + + extents<3> de141 {1,4,1}; + extents<4> de1111 {1,1,1,1}; + extents<5> de14111 {1,4,1,1,1}; + extents<6> de112111 {1,1,2,1,1,1}; + extents<6> de112311 {1,1,2,3,1,1}; +}; + +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_access, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("access")) +{ + + namespace ublas = boost::numeric::ublas; + +// BOOST_REQUIRE_EQUAL(ublas::size(de), 0); +// BOOST_CHECK (ublas::empty(de) ); + + BOOST_REQUIRE_EQUAL(ublas::size(de11) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de12) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de21) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de23) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(de231) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de123) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de1123) , 4); + BOOST_REQUIRE_EQUAL(ublas::size(de12311) , 5); + BOOST_REQUIRE_EQUAL(ublas::size(de423) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de4213) , 4); + BOOST_REQUIRE_EQUAL(ublas::size(de42131) , 5); + BOOST_REQUIRE_EQUAL(ublas::size(de142131), 6); + BOOST_REQUIRE_EQUAL(ublas::size(de141) , 3); + BOOST_REQUIRE_EQUAL(ublas::size(de1111) , 4); + BOOST_REQUIRE_EQUAL(ublas::size(de14111) , 5); + BOOST_REQUIRE_EQUAL(ublas::size(de112111), 6); + BOOST_REQUIRE_EQUAL(ublas::size(de112311), 6); + + + BOOST_CHECK_EQUAL(de11[0],1); + BOOST_CHECK_EQUAL(de11[1],1); + + BOOST_CHECK_EQUAL(de12[0],1); + BOOST_CHECK_EQUAL(de12[1],2); + + BOOST_CHECK_EQUAL(de21[0],2); + BOOST_CHECK_EQUAL(de21[1],1); + + BOOST_CHECK_EQUAL(de23[0],2); + BOOST_CHECK_EQUAL(de23[1],3); + + BOOST_CHECK_EQUAL(de231[0],2); + BOOST_CHECK_EQUAL(de231[1],3); + BOOST_CHECK_EQUAL(de231[2],1); + + BOOST_CHECK_EQUAL(de123[0],1); + BOOST_CHECK_EQUAL(de123[1],2); + BOOST_CHECK_EQUAL(de123[2],3); + + BOOST_CHECK_EQUAL(de1123[0],1); + BOOST_CHECK_EQUAL(de1123[1],1); + BOOST_CHECK_EQUAL(de1123[2],2); + BOOST_CHECK_EQUAL(de1123[3],3); + + BOOST_CHECK_EQUAL(de12311[0],1); + BOOST_CHECK_EQUAL(de12311[1],2); + BOOST_CHECK_EQUAL(de12311[2],3); + BOOST_CHECK_EQUAL(de12311[3],1); + BOOST_CHECK_EQUAL(de12311[4],1); + + BOOST_CHECK_EQUAL(de423[0],4); + BOOST_CHECK_EQUAL(de423[1],2); + BOOST_CHECK_EQUAL(de423[2],3); + + BOOST_CHECK_EQUAL(de4213[0],4); + BOOST_CHECK_EQUAL(de4213[1],2); + BOOST_CHECK_EQUAL(de4213[2],1); + BOOST_CHECK_EQUAL(de4213[3],3); + + BOOST_CHECK_EQUAL(de42131[0],4); + BOOST_CHECK_EQUAL(de42131[1],2); + BOOST_CHECK_EQUAL(de42131[2],1); + BOOST_CHECK_EQUAL(de42131[3],3); + BOOST_CHECK_EQUAL(de42131[4],1); + + BOOST_CHECK_EQUAL(de142131[0],1); + BOOST_CHECK_EQUAL(de142131[1],4); + BOOST_CHECK_EQUAL(de142131[2],2); + BOOST_CHECK_EQUAL(de142131[3],1); + BOOST_CHECK_EQUAL(de142131[4],3); + BOOST_CHECK_EQUAL(de142131[5],1); + + BOOST_CHECK_EQUAL(de141[0],1); + BOOST_CHECK_EQUAL(de141[1],4); + BOOST_CHECK_EQUAL(de141[2],1); + + BOOST_CHECK_EQUAL(de1111[0],1); + BOOST_CHECK_EQUAL(de1111[1],1); + BOOST_CHECK_EQUAL(de1111[2],1); + BOOST_CHECK_EQUAL(de1111[3],1); + + BOOST_CHECK_EQUAL(de14111[0],1); + BOOST_CHECK_EQUAL(de14111[1],4); + BOOST_CHECK_EQUAL(de14111[2],1); + BOOST_CHECK_EQUAL(de14111[3],1); + BOOST_CHECK_EQUAL(de14111[4],1); + + BOOST_CHECK_EQUAL(de112111[0],1); + BOOST_CHECK_EQUAL(de112111[1],1); + BOOST_CHECK_EQUAL(de112111[2],2); + BOOST_CHECK_EQUAL(de112111[3],1); + BOOST_CHECK_EQUAL(de112111[4],1); + BOOST_CHECK_EQUAL(de112111[5],1); + + BOOST_CHECK_EQUAL(de112311[0],1); + BOOST_CHECK_EQUAL(de112311[1],1); + BOOST_CHECK_EQUAL(de112311[2],2); + BOOST_CHECK_EQUAL(de112311[3],3); + BOOST_CHECK_EQUAL(de112311[4],1); + BOOST_CHECK_EQUAL(de112311[5],1); +} + +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_copy_ctor, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("copy_ctor")) +{ + namespace ublas = boost::numeric::ublas; + +// auto e = de; + auto e1 = de11; + auto e12 = de12; + auto e21 = de21; + auto e23 = de23; + auto e231 = de231; + auto e123 = de123; + auto e1123 = de1123; + auto e12311 = de12311; + auto e423 = de423; + auto e4213 = de4213; + auto e42131 = de42131; + auto e142131 = de142131; + auto e141 = de141; + auto e1111 = de1111; + auto e14111 = de14111; + auto e112111 = de112111; + auto e112311 = de112311; + + +// BOOST_CHECK (ublas::empty(e) ); + +// BOOST_REQUIRE_EQUAL(ublas::size(e) , 0); + BOOST_REQUIRE_EQUAL(ublas::size(e1) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e12) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e21) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e23) , 2); + BOOST_REQUIRE_EQUAL(ublas::size(e231), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e123), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e1123), 4); + BOOST_REQUIRE_EQUAL(ublas::size(e12311), 5); + BOOST_REQUIRE_EQUAL(ublas::size(e423), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e4213), 4); + BOOST_REQUIRE_EQUAL(ublas::size(e42131), 5); + BOOST_REQUIRE_EQUAL(ublas::size(e142131), 6); + BOOST_REQUIRE_EQUAL(ublas::size(e141), 3); + BOOST_REQUIRE_EQUAL(ublas::size(e1111), 4); + BOOST_REQUIRE_EQUAL(ublas::size(e14111), 5); + BOOST_REQUIRE_EQUAL(ublas::size(e112111), 6); + BOOST_REQUIRE_EQUAL(ublas::size(e112311), 6); + + + BOOST_CHECK_EQUAL(e1[0],1); + BOOST_CHECK_EQUAL(e1[1],1); + + BOOST_CHECK_EQUAL(e12[0],1); + BOOST_CHECK_EQUAL(e12[1],2); + + BOOST_CHECK_EQUAL(e21[0],2); + BOOST_CHECK_EQUAL(e21[1],1); + + BOOST_CHECK_EQUAL(e23[0],2); + BOOST_CHECK_EQUAL(e23[1],3); + + BOOST_CHECK_EQUAL(e231[0],2); + BOOST_CHECK_EQUAL(e231[1],3); + BOOST_CHECK_EQUAL(e231[2],1); + + BOOST_CHECK_EQUAL(e123[0],1); + BOOST_CHECK_EQUAL(e123[1],2); + BOOST_CHECK_EQUAL(e123[2],3); + + BOOST_CHECK_EQUAL(e1123[0],1); + BOOST_CHECK_EQUAL(e1123[1],1); + BOOST_CHECK_EQUAL(e1123[2],2); + BOOST_CHECK_EQUAL(e1123[3],3); + + BOOST_CHECK_EQUAL(e12311[0],1); + BOOST_CHECK_EQUAL(e12311[1],2); + BOOST_CHECK_EQUAL(e12311[2],3); + BOOST_CHECK_EQUAL(e12311[3],1); + BOOST_CHECK_EQUAL(e12311[4],1); + + BOOST_CHECK_EQUAL(e423[0],4); + BOOST_CHECK_EQUAL(e423[1],2); + BOOST_CHECK_EQUAL(e423[2],3); + + BOOST_CHECK_EQUAL(e4213[0],4); + BOOST_CHECK_EQUAL(e4213[1],2); + BOOST_CHECK_EQUAL(e4213[2],1); + BOOST_CHECK_EQUAL(e4213[3],3); + + BOOST_CHECK_EQUAL(e42131[0],4); + BOOST_CHECK_EQUAL(e42131[1],2); + BOOST_CHECK_EQUAL(e42131[2],1); + BOOST_CHECK_EQUAL(e42131[3],3); + BOOST_CHECK_EQUAL(e42131[4],1); + + BOOST_CHECK_EQUAL(e142131[0],1); + BOOST_CHECK_EQUAL(e142131[1],4); + BOOST_CHECK_EQUAL(e142131[2],2); + BOOST_CHECK_EQUAL(e142131[3],1); + BOOST_CHECK_EQUAL(e142131[4],3); + BOOST_CHECK_EQUAL(e142131[5],1); + + BOOST_CHECK_EQUAL(e141[0],1); + BOOST_CHECK_EQUAL(e141[1],4); + BOOST_CHECK_EQUAL(e141[2],1); + + BOOST_CHECK_EQUAL(e1111[0],1); + BOOST_CHECK_EQUAL(e1111[1],1); + BOOST_CHECK_EQUAL(e1111[2],1); + BOOST_CHECK_EQUAL(e1111[3],1); + + BOOST_CHECK_EQUAL(e14111[0],1); + BOOST_CHECK_EQUAL(e14111[1],4); + BOOST_CHECK_EQUAL(e14111[2],1); + BOOST_CHECK_EQUAL(e14111[3],1); + BOOST_CHECK_EQUAL(e14111[4],1); + + BOOST_CHECK_EQUAL(e112111[0],1); + BOOST_CHECK_EQUAL(e112111[1],1); + BOOST_CHECK_EQUAL(e112111[2],2); + BOOST_CHECK_EQUAL(e112111[3],1); + BOOST_CHECK_EQUAL(e112111[4],1); + BOOST_CHECK_EQUAL(e112111[5],1); + + BOOST_CHECK_EQUAL(e112311[0],1); + BOOST_CHECK_EQUAL(e112311[1],1); + BOOST_CHECK_EQUAL(e112311[2],2); + BOOST_CHECK_EQUAL(e112311[3],3); + BOOST_CHECK_EQUAL(e112311[4],1); + BOOST_CHECK_EQUAL(e112311[5],1); + +} + +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_is, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("query")) +{ + namespace ublas = boost::numeric::ublas; + + +// auto e = de; + auto e11 = de11; + auto e12 = de12; + auto e21 = de21; + auto e23 = de23; + auto e231 = de231; + auto e123 = de123; + auto e1123 = de1123; + auto e12311 = de12311; + auto e423 = de423; + auto e4213 = de4213; + auto e42131 = de42131; + auto e142131 = de142131; + auto e141 = de141; + auto e1111 = de1111; + auto e14111 = de14111; + auto e112111 = de112111; + auto e112311 = de112311; + +// BOOST_CHECK( ublas::empty (e)); +// BOOST_CHECK( ! ublas::is_scalar(e)); +// BOOST_CHECK( ! ublas::is_vector(e)); +// BOOST_CHECK( ! ublas::is_matrix(e)); +// BOOST_CHECK( ! ublas::is_tensor(e)); + + BOOST_CHECK( ! ublas::empty (e11) ); + BOOST_CHECK( ublas::is_scalar(e11) ); + BOOST_CHECK( ublas::is_vector(e11) ); + BOOST_CHECK( ublas::is_matrix(e11) ); + BOOST_CHECK( ! ublas::is_tensor(e11) ); + + BOOST_CHECK( ! ublas::empty (e12) ); + BOOST_CHECK( ! ublas::is_scalar(e12) ); + BOOST_CHECK( ublas::is_vector(e12) ); + BOOST_CHECK( ublas::is_matrix(e12) ); + BOOST_CHECK( ! ublas::is_tensor(e12) ); + + BOOST_CHECK( ! ublas::empty (e21) ); + BOOST_CHECK( ! ublas::is_scalar(e21) ); + BOOST_CHECK( ublas::is_vector(e21) ); + BOOST_CHECK( ublas::is_matrix(e21) ); + BOOST_CHECK( ! ublas::is_tensor(e21) ); + + BOOST_CHECK( ! ublas::empty (e23) ); + BOOST_CHECK( ! ublas::is_scalar(e23) ); + BOOST_CHECK( ! ublas::is_vector(e23) ); + BOOST_CHECK( ublas::is_matrix(e23) ); + BOOST_CHECK( ! ublas::is_tensor(e23) ); + + BOOST_CHECK( ! ublas::empty (e231) ); + BOOST_CHECK( ! ublas::is_scalar(e231) ); + BOOST_CHECK( ! ublas::is_vector(e231) ); + BOOST_CHECK( ublas::is_matrix(e231) ); + BOOST_CHECK( ! ublas::is_tensor(e231) ); + + BOOST_CHECK( ! ublas::empty (e123) ); + BOOST_CHECK( ! ublas::is_scalar(e123) ); + BOOST_CHECK( ! ublas::is_vector(e123) ); + BOOST_CHECK( ! ublas::is_matrix(e123) ); + BOOST_CHECK( ublas::is_tensor(e123) ); + + BOOST_CHECK( ! ublas::empty (e1123) ); + BOOST_CHECK( ! ublas::is_scalar(e1123) ); + BOOST_CHECK( ! ublas::is_vector(e1123) ); + BOOST_CHECK( ! ublas::is_matrix(e1123) ); + BOOST_CHECK( ublas::is_tensor(e1123) ); + + BOOST_CHECK( ! ublas::empty (e12311) ); + BOOST_CHECK( ! ublas::is_scalar(e12311) ); + BOOST_CHECK( ! ublas::is_vector(e12311) ); + BOOST_CHECK( ! ublas::is_matrix(e12311) ); + BOOST_CHECK( ublas::is_tensor(e12311) ); + + BOOST_CHECK( ! ublas::empty (e423) ); + BOOST_CHECK( ! ublas::is_scalar(e423) ); + BOOST_CHECK( ! ublas::is_vector(e423) ); + BOOST_CHECK( ! ublas::is_matrix(e423) ); + BOOST_CHECK( ublas::is_tensor(e423) ); + + BOOST_CHECK( ! ublas::empty (e4213) ); + BOOST_CHECK( ! ublas::is_scalar(e4213) ); + BOOST_CHECK( ! ublas::is_vector(e4213) ); + BOOST_CHECK( ! ublas::is_matrix(e4213) ); + BOOST_CHECK( ublas::is_tensor(e4213) ); + + BOOST_CHECK( ! ublas::empty (e42131) ); + BOOST_CHECK( ! ublas::is_scalar(e42131) ); + BOOST_CHECK( ! ublas::is_vector(e42131) ); + BOOST_CHECK( ! ublas::is_matrix(e42131) ); + BOOST_CHECK( ublas::is_tensor(e42131) ); + + BOOST_CHECK( ! ublas::empty (e142131) ); + BOOST_CHECK( ! ublas::is_scalar(e142131) ); + BOOST_CHECK( ! ublas::is_vector(e142131) ); + BOOST_CHECK( ! ublas::is_matrix(e142131) ); + BOOST_CHECK( ublas::is_tensor(e142131) ); + + BOOST_CHECK( ! ublas::empty (e141) ); + BOOST_CHECK( ! ublas::is_scalar(e141) ); + BOOST_CHECK( ublas::is_vector(e141) ); + BOOST_CHECK( ublas::is_matrix(e141) ); + BOOST_CHECK( ! ublas::is_tensor(e141) ); + + BOOST_CHECK( ! ublas::empty (e1111) ); + BOOST_CHECK( ublas::is_scalar(e1111) ); + BOOST_CHECK( ublas::is_vector(e1111) ); + BOOST_CHECK( ublas::is_matrix(e1111) ); + BOOST_CHECK( ! ublas::is_tensor(e1111) ); + + BOOST_CHECK( ! ublas::empty (e14111) ); + BOOST_CHECK( ! ublas::is_scalar(e14111) ); + BOOST_CHECK( ublas::is_vector(e14111) ); + BOOST_CHECK( ublas::is_matrix(e14111) ); + BOOST_CHECK( ! ublas::is_tensor(e14111) ); + + BOOST_CHECK( ! ublas::empty (e112111) ); + BOOST_CHECK( ! ublas::is_scalar(e112111) ); + BOOST_CHECK( ! ublas::is_vector(e112111) ); + BOOST_CHECK( ! ublas::is_matrix(e112111) ); + BOOST_CHECK( ublas::is_tensor(e112111) ); + + BOOST_CHECK( ! ublas::empty (e112311) ); + BOOST_CHECK( ! ublas::is_scalar(e112311) ); + BOOST_CHECK( ! ublas::is_vector(e112311) ); + BOOST_CHECK( ! ublas::is_matrix(e112311) ); + BOOST_CHECK( ublas::is_tensor(e112311) ); +} + +//BOOST_FIXTURE_TEST_CASE(test_extents_static_size_squeeze, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("squeeze")) +//{ +// auto e1 = squeeze(de1); // {1,1} +// auto e2 = squeeze(de2); // {1,2} +// auto 21 = squeeze(d21); // {2,1} + +// auto e4 = squeeze(de4); // {2,3} +// auto e231 = squeeze(de231); // {2,3} +// auto e123 = squeeze(de123); // {2,3} +// auto e1123 = squeeze(de1123); // {2,3} +// auto e12311 = squeeze(de12311); // {2,3} + +// auto e423 = squeeze(de423); // {4,2,3} +// auto e4213 = squeeze(de4213); // {4,2,3} +// auto e11 = squeeze(de11); // {4,2,3} +// auto e12 = squeeze(e142131); // {4,2,3} + +// auto e141 = squeeze(de141); // {1,4} +// auto e1111 = squeeze(de1111); // {1,1} +// auto e14111 = squeeze(de14111); // {1,4} +// auto e112111 = squeeze(de112111); // {2,1} +// auto e112311 = squeeze(de112311); // {2,3} + +// BOOST_CHECK( (e1 == extents<2>{1,1}) ); +// BOOST_CHECK( (e2 == extents<2>{1,2}) ); +// BOOST_CHECK( (21 == extents<2>{2,1}) ); + +// BOOST_CHECK( (e4 == extents<2>{2,3}) ); +// BOOST_CHECK( (e231 == extents<2>{2,3}) ); +// BOOST_CHECK( (e123 == extents<2>{2,3}) ); +// BOOST_CHECK( (e1123 == extents<2>{2,3}) ); +// BOOST_CHECK( (e12311 == extents<2>{2,3}) ); + +// BOOST_CHECK( (e423 == extents<3>{4,2,3}) ); +// BOOST_CHECK( (e4213 == extents<3>{4,2,3}) ); +// BOOST_CHECK( (e11 == extents<3>{4,2,3}) ); +// BOOST_CHECK( (e12 == extents<3>{4,2,3}) ); + +// BOOST_CHECK( (e141 == extents<2>{1,4}) ); +// BOOST_CHECK( (e1111 == extents<2>{1,1}) ); +// BOOST_CHECK( (e14111 == extents<2>{1,4}) ); +// BOOST_CHECK( (e112111 == extents<2>{2,1}) ); +// BOOST_CHECK( (e112311 == extents<2>{2,3}) ); + +//} + + +BOOST_FIXTURE_TEST_CASE(test_extents_static_size_product, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("product")) +{ + namespace ublas = boost::numeric::ublas; + +// auto e = ublas::product( de ); + auto e11 = ublas::product( de11 ); + auto e12 = ublas::product( de12 ); + auto e21 = ublas::product( de21 ); + auto e23 = ublas::product( de23 ); + auto e231 = ublas::product( de231 ); + auto e123 = ublas::product( de123 ); + auto e1123 = ublas::product( de1123 ); + auto e12311 = ublas::product( de12311 ); + auto e423 = ublas::product( de423 ); + auto e4213 = ublas::product( de4213 ); + auto e42131 = ublas::product( de42131 ); + auto e142131 = ublas::product( de142131 ); + auto e141 = ublas::product( de141 ); + auto e1111 = ublas::product( de1111 ); + auto e14111 = ublas::product( de14111 ); + auto e112111 = ublas::product( de112111 ); + auto e112311 = ublas::product( de112311 ); + +// BOOST_CHECK_EQUAL( e , 0 ); + BOOST_CHECK_EQUAL( e11 , 1 ); + BOOST_CHECK_EQUAL( e12 , 2 ); + BOOST_CHECK_EQUAL( e21 , 2 ); + BOOST_CHECK_EQUAL( e23 , 6 ); + BOOST_CHECK_EQUAL( e231 , 6 ); + BOOST_CHECK_EQUAL( e123 , 6 ); + BOOST_CHECK_EQUAL( e1123 , 6 ); + BOOST_CHECK_EQUAL( e12311 , 6 ); + BOOST_CHECK_EQUAL( e423 , 24 ); + BOOST_CHECK_EQUAL( e4213 , 24 ); + BOOST_CHECK_EQUAL( e42131 , 24 ); + BOOST_CHECK_EQUAL( e142131, 24 ); + BOOST_CHECK_EQUAL( e141 , 4 ); + BOOST_CHECK_EQUAL( e1111 , 1 ); + BOOST_CHECK_EQUAL( e14111 , 4 ); + BOOST_CHECK_EQUAL( e112111, 2 ); + BOOST_CHECK_EQUAL( e112311, 6 ); + + +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_matrix_vector.cpp b/test/tensor/test_subtensor_matrix_vector.cpp new file mode 100644 index 000000000..e988d9f75 --- /dev/null +++ b/test/tensor/test_subtensor_matrix_vector.cpp @@ -0,0 +1,472 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + +#include +#include +#include +#include +#include + +#include "utility.hpp" + +BOOST_AUTO_TEST_SUITE ( test_tensor_static_rank_matrix_interoperability ) + +using test_types = zip::with_t; + + +BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, value, test_types) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout = typename value::second_type; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; + + auto a2 = tensor( matrix(1,1) ); + BOOST_CHECK_EQUAL( a2.size() , 1 ); + BOOST_CHECK( !a2.empty() ); + BOOST_CHECK_NE( a2.data() , nullptr); + + auto a3 = tensor( matrix(2,1) ); + BOOST_CHECK_EQUAL( a3.size() , 2 ); + BOOST_CHECK( !a3.empty() ); + BOOST_CHECK_NE( a3.data() , nullptr); + + auto a4 = tensor( matrix(1,2) ); + BOOST_CHECK_EQUAL( a4.size() , 2 ); + BOOST_CHECK( !a4.empty() ); + BOOST_CHECK_NE( a4.data() , nullptr); + + auto a5 = tensor( matrix(2,3) ); + BOOST_CHECK_EQUAL( a5.size() , 6 ); + BOOST_CHECK( !a5.empty() ); + BOOST_CHECK_NE( a5.data() , nullptr); +} + + +BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_static_rank; + using vector_type = typename tensor_type::vector_type; + + auto a2 = tensor_type( vector_type(1) ); + BOOST_CHECK_EQUAL( a2.size() , 1 ); + BOOST_CHECK( !a2.empty() ); + BOOST_CHECK_NE( a2.data() , nullptr); + + auto a3 = tensor_type( vector_type(2) ); + BOOST_CHECK_EQUAL( a3.size() , 2 ); + BOOST_CHECK( !a3.empty() ); + BOOST_CHECK_NE( a3.data() , nullptr); + + auto a4 = tensor_type( vector_type(2) ); + BOOST_CHECK_EQUAL( a4.size() , 2 ); + BOOST_CHECK( !a4.empty() ); + BOOST_CHECK_NE( a4.data() , nullptr); + + auto a5 = tensor_type( vector_type(3) ); + BOOST_CHECK_EQUAL( a5.size() , 3 ); + BOOST_CHECK( !a5.empty() ); + BOOST_CHECK_NE( a5.data() , nullptr); +} + + +struct fixture +{ + template + using extents_type = boost::numeric::ublas::extents; + + std::tuple< + extents_type<2>, // 0 + extents_type<2>, // 1 + extents_type<2>, // 2 + extents_type<2>, // 3 + extents_type<2> // 4 + > extents = { + {1,1}, + {1,2}, + {2,1}, + {6,6}, + {9,7}, + }; +}; + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; + + assert(ublas::size(e)==2); + tensor t = matrix{e[0],e[1]}; + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + }; + + for_each_in_tuple(extents,check); +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor = ublas::tensor_static_rank; + using vector = typename tensor::vector_type; + + assert(ublas::size(e)==2); + if(ublas::empty(e)) + return; + + tensor t = vector (product(e)); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + }; + + for_each_in_tuple(extents,check); +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + + for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e) { + using etype = std::decay_t; + constexpr auto size = std::tuple_size_v; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; + + assert(ublas::size(e) == 2); + auto t = tensor{e[1],e[0]}; + auto r = matrix(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r; + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto j = 0ul; j < t.size(1); ++j){ + for(auto i = 0ul; i < t.size(0); ++i){ + BOOST_CHECK_EQUAL( t.at(i,j), r(i,j) ); + } + } + }); + + //for_each_in_tuple(extents,check); +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using vector_type = typename tensor_type::vector_type; + + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = vector_type(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r; + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto i = 0ul; i < t.size(); ++i){ + BOOST_CHECK_EQUAL( t[i], r(i) ); + } + }; + + for_each_in_tuple(extents,check); +} + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using matrix_type = typename tensor_type::matrix_type; + + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = matrix_type(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + auto q = r; + t = std::move(r); + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto j = 0ul; j < t.size(1); ++j){ + for(auto i = 0ul; i < t.size(0); ++i){ + BOOST_CHECK_EQUAL( t.at(i,j), q(i,j) ); + } + } + }; + + for_each_in_tuple(extents,check); +} + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using vector_type = typename tensor_type::vector_type; + + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = vector_type(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + auto q = r; + t = std::move(r); + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) * e.at(1)); + BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + for(auto i = 0ul; i < t.size(); ++i){ + BOOST_CHECK_EQUAL( t[i], q(i) ); + } + }; + + for_each_in_tuple(extents,check); +} + + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using matrix_type = typename tensor_type::matrix_type; + + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = matrix_type(e[0],e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r + 3*r; + tensor_type s = r + 3*r; + tensor_type q = s + r + 3*r + s; // + 3*r + + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( s.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); + BOOST_CHECK ( !s.empty() ); + BOOST_CHECK_NE ( s.data() , nullptr); + + BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0) ); + BOOST_CHECK_EQUAL ( q.extents().at(1) , e.at(1) ); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); + BOOST_CHECK ( !q.empty() ); + BOOST_CHECK_NE ( q.data() , nullptr); + + + for(auto j = 0ul; j < t.size(1); ++j){ + for(auto i = 0ul; i < t.size(0); ++i){ + BOOST_CHECK_EQUAL( t.at(i,j), 4*r(i,j) ); + BOOST_CHECK_EQUAL( s.at(i,j), t.at(i,j) ); + BOOST_CHECK_EQUAL( q.at(i,j), 3*s.at(i,j) ); + } + } + }; + + for_each_in_tuple(extents,check); +} + + + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor_type = ublas::tensor_static_rank; + using vector_type = typename tensor_type::vector_type; + + assert(ublas::size(e) == 2); + auto t = tensor_type{e[1],e[0]}; + auto r = vector_type(e[0]*e[1]); + std::iota(r.data().begin(),r.data().end(), 1); + t = r + 3*r; + tensor_type s = r + 3*r; + tensor_type q = s + r + 3*r + s; // + 3*r + + + BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); + BOOST_CHECK ( !t.empty() ); + BOOST_CHECK_NE ( t.data() , nullptr); + + BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( s.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); + BOOST_CHECK ( !s.empty() ); + BOOST_CHECK_NE ( s.data() , nullptr); + + BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0)*e.at(1) ); + BOOST_CHECK_EQUAL ( q.extents().at(1) , 1); + BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); + BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); + BOOST_CHECK ( !q.empty() ); + BOOST_CHECK_NE ( q.data() , nullptr); + + + + for(auto i = 0ul; i < t.size(); ++i){ + BOOST_CHECK_EQUAL( t.at(i), 4*r(i) ); + BOOST_CHECK_EQUAL( s.at(i), t.at(i) ); + BOOST_CHECK_EQUAL( q.at(i), 3*s.at(i) ); + } + }; + + for_each_in_tuple(extents,check); +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, pair, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value = typename pair::first_type; + using layout = typename pair::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) { + constexpr auto size = std::tuple_size_v>; + using tensor = ublas::tensor_static_rank; + using matrix = typename tensor::matrix_type; + using vector = typename tensor::vector_type; + + if(product(e) <= 2) + return; + assert(ublas::size(e) == 2); + auto Q = tensor{e[0],1}; + auto A = matrix(e[0],e[1]); + auto b = vector(e[1]); + auto c = vector(e[0]); + std::iota(b.data().begin(),b.data().end(), 1); + std::fill(A.data().begin(),A.data().end(), 1); + std::fill(c.data().begin(),c.data().end(), 2); + std::fill(Q.begin(),Q.end(), 2); + + tensor T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; + + BOOST_CHECK_EQUAL ( T.extents().at(0) , Q.extents().at(0) ); + BOOST_CHECK_EQUAL ( T.extents().at(1) , Q.extents().at(1)); + BOOST_CHECK_EQUAL ( T.size() , Q.size() ); + BOOST_CHECK_EQUAL ( T.size() , c.size() ); + BOOST_CHECK_EQUAL ( T.rank() , Q.rank() ); + BOOST_CHECK ( !T.empty() ); + BOOST_CHECK_NE ( T.data() , nullptr); + + const auto n = e[1]; + const auto ab = value(std::div(n*(n+1),2).quot); + const auto ref = ab+4*Q(0)+2*c(0); + BOOST_CHECK( std::all_of(T.begin(),T.end(), [ref](auto cc){ return ref == cc; }) ); + +// for(auto i = 0ul; i < T.size(); ++i){ +// auto n = e[1]; +// auto ab = n * (n+1) / 2; +// BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); +// } + + }; + for_each_in_tuple(extents,check); +} + + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_operators_arithmetic.cpp b/test/tensor/test_subtensor_operators_arithmetic.cpp new file mode 100644 index 000000000..08ef6b8f9 --- /dev/null +++ b/test/tensor/test_subtensor_operators_arithmetic.cpp @@ -0,0 +1,238 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + + +#include + +#include +#include +#include "utility.hpp" + +BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_arithmetic_operations) + +using double_extended = boost::multiprecision::cpp_bin_float_double_extended; + +using test_types = zip::with_t; + +struct fixture +{ + template + using extents_t = boost::numeric::ublas::extents; + + std::tuple< + extents_t<2>, // 1 + extents_t<2>, // 2 + extents_t<3>, // 3 + extents_t<3>, // 4 + extents_t<4> // 5 + > extents = { + extents_t<2>{1,1}, + extents_t<2>{2,3}, + extents_t<3>{4,1,3}, + extents_t<3>{4,2,3}, + extents_t<4>{4,2,3,5} + }; +}; + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto r = tensor_t (e); + auto v = value_t {}; + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + r = t + t + t + t2; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 3*t(i) + t2(i) ); + + + r = t2 / (t+3) * (t+1) - t2; // r = ( t2/ ((t+3)*(t+1)) ) - t2 + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), t2(i) / (t(i)+3)*(t(i)+1) - t2(i) ); + + r = 3+t2 / (t+3) * (t+1) * t - t2; // r = 3+( t2/ ((t+3)*(t+1)*t) ) - t2 + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 3+t2(i) / (t(i)+3)*(t(i)+1)*t(i) - t2(i) ); + + r = t2 - t + t2 - t; + + for(auto i = 0ul; i < r.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 4 ); + + + r = t * t * t * t2; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), t(i)*t(i)*t(i)*t2(i) ); + + r = (t2/t2) * (t2/t2); + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 1 ); + }; + + for_each_in_tuple(extents,check); +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + tensor_t r1 = t + 2 + t + 2; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r1(i), 2*t(i) + 4 ); + + tensor_t r2 = 2 + t + 2 + t; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r2(i), 2*t(i) + 4 ); + + tensor_t r3 = (t-2) + (t-2); + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r3(i), 2*t(i) - 4 ); + + tensor_t r4 = (t*2) * (3*t); + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r4(i), 2*3*t(i)*t(i) ); + + tensor_t r5 = (t2*2) / (2*t2) * t2; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r5(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); + + tensor_t r6 = (t2/2+1) / (2/t2+1) / t2; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r6(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); + + }; + + for_each_in_tuple(extents,check); +} + + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto r = tensor_t (e); + auto v = value_t {}; + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + r = t + 2; + r += t; + r += 2; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + + r = 2 + t; + r += t; + r += 2; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + + r = (t-2); + r += t; + r -= 2; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*t(i) - 4 ); + + r = (t*2); + r *= 3; + r *= t; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*3*t(i)*t(i) ); + + r = (t2*2); + r /= 2; + r /= t2; + r *= t2; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); + + r = (t2/2+1); + r /= (2/t2+1); + r /= t2; + + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); + + tensor_t q = -r; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( q(i), -r(i) ); + + tensor_t p = +r; + for(auto i = 0ul; i < t.size(); ++i) + BOOST_CHECK_EQUAL ( p(i), r(i) ); + }; + + for_each_in_tuple(extents,check); +} + + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_operators_comparison.cpp b/test/tensor/test_subtensor_operators_comparison.cpp new file mode 100644 index 000000000..1eb3c4396 --- /dev/null +++ b/test/tensor/test_subtensor_operators_comparison.cpp @@ -0,0 +1,197 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + + +#include +#include +#include +#include "utility.hpp" + +BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_comparison) + +using double_extended = boost::multiprecision::cpp_bin_float_double_extended; + +using test_types = zip::with_t; + +struct fixture { + template + using extents_t = boost::numeric::ublas::extents; + + std::tuple< + extents_t<2>, // 1 + extents_t<2>, // 2 + extents_t<3>, // 3 + extents_t<3>, // 4 + extents_t<4> // 5 + > extents = { + extents_t<2>{1,1}, + extents_t<2>{2,3}, + extents_t<3>{4,1,3}, + extents_t<3>{4,2,3}, + extents_t<4>{4,2,3,5} + }; +}; + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) + { + using extents_t = std::decay_t; + using tensor_t = ublas::tensor_static_rank, layout_t>; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + BOOST_CHECK( t == t ); + BOOST_CHECK( t != t2 ); + + if(t.empty()) + return; + + BOOST_CHECK(!(t < t)); + BOOST_CHECK(!(t > t)); + BOOST_CHECK( t < t2 ); + BOOST_CHECK( t2 > t ); + BOOST_CHECK( t <= t ); + BOOST_CHECK( t >= t ); + BOOST_CHECK( t <= t2 ); + BOOST_CHECK( t2 >= t ); + BOOST_CHECK( t2 >= t2 ); + BOOST_CHECK( t2 >= t ); + }; + + for_each_in_tuple(extents,check); + +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { + using extents_t = std::decay_t; + using tensor_t = ublas::tensor_static_rank, layout_t>; + + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + BOOST_CHECK( t == t ); + BOOST_CHECK( t != t2 ); + + if(t.empty()) + return; + + BOOST_CHECK( !(t < t) ); + BOOST_CHECK( !(t > t) ); + BOOST_CHECK( t < (t2+t) ); + BOOST_CHECK( (t2+t) > t ); + BOOST_CHECK( t <= (t+t) ); + BOOST_CHECK( (t+t2) >= t ); + BOOST_CHECK( (t2+t2+2) >= t); + BOOST_CHECK( 2*t2 > t ); + BOOST_CHECK( t < 2*t2 ); + BOOST_CHECK( 2*t2 > t); + BOOST_CHECK( 2*t2 >= t2 ); + BOOST_CHECK( t2 <= 2*t2); + BOOST_CHECK( 3*t2 >= t ); + }); + + +} + + + +//BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, test_types, fixture) +//{ +// namespace ublas = boost::numeric::ublas; +// using value_t = typename value::first_type; +// using layout_t = typename value::second_type; + + +// for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e) { +// using extents_t = std::decay_t; +// using tensor_t = ublas::tensor_static_rank, layout_t>; + +// BOOST_CHECK( tensor_t(e,value_t{2}) == tensor_t(e,value_t{2}) ); +// BOOST_CHECK( tensor_t(e,value_t{2}) != tensor_t(e,value_t{1}) ); + +// if(ublas::empty(e)) +// return; + +// BOOST_CHECK( !(tensor_t(e,2) < 2) ); +// BOOST_CHECK( !(tensor_t(e,2) > 2) ); +// BOOST_CHECK( (tensor_t(e,2) >= 2) ); +// BOOST_CHECK( (tensor_t(e,2) <= 2) ); +// BOOST_CHECK( (tensor_t(e,2) == 2) ); +// BOOST_CHECK( (tensor_t(e,2) != 3) ); + +// BOOST_CHECK( !(2 > tensor_t(e,2)) ); +// BOOST_CHECK( !(2 < tensor_t(e,2)) ); +// BOOST_CHECK( (2 <= tensor_t(e,2)) ); +// BOOST_CHECK( (2 >= tensor_t(e,2)) ); +// BOOST_CHECK( (2 == tensor_t(e,2)) ); +// BOOST_CHECK( (3 != tensor_t(e,2)) ); + +// BOOST_CHECK( !( tensor_t(e,2)+3 < 5) ); +// BOOST_CHECK( !( tensor_t(e,2)+3 > 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 >= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 <= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 == 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 != 6) ); + + +// BOOST_CHECK( !( 5 > tensor_t(e,2)+3) ); +// BOOST_CHECK( !( 5 < tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 >= tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 <= tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 == tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 6 != tensor_t(e,2)+3) ); + + +// BOOST_CHECK( !( tensor_t(e,2)+tensor_t(e,3) < 5) ); +// BOOST_CHECK( !( tensor_t(e,2)+tensor_t(e,3) > 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) >= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) <= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) == 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) != 6) ); + + +// BOOST_CHECK( !( 5 > tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( !( 5 < tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 >= tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 <= tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 == tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 6 != tensor_t(e,2)+tensor_t(e,3)) ); + +// }); + +//} + + +BOOST_AUTO_TEST_SUITE_END() From 740fd93e4dd4a3e4b457ef2effb87da433b954e5 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Sun, 22 Aug 2021 22:59:00 +0530 Subject: [PATCH 34/40] Fixed subtensor of subtensor creation --- examples/tensor/access_subtensor.cpp | 20 ++++-- include/boost/numeric/ublas/tensor/access.hpp | 4 +- include/boost/numeric/ublas/tensor/span.hpp | 9 --- .../ublas/tensor/tensor/subtensor_dynamic.hpp | 65 ++++++------------- .../ublas/tensor/tensor/subtensor_engine.hpp | 1 + .../tensor/tensor/subtensor_static_rank.hpp | 64 ++++++------------ test/tensor/test_expression_evaluation.cpp | 4 +- 7 files changed, 56 insertions(+), 111 deletions(-) diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index fbe994175..6a90f14bd 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -25,6 +25,7 @@ int main() using layout = ublas::layout::first_order; // storage format using tensor = ublas::tensor_dynamic; using span = ublas::span<>; + using subtensor = typename tensor::subtensor_type; constexpr auto ones = ublas::ones{}; @@ -42,20 +43,25 @@ int main() } } } - auto A = t1(span(1), span(0,2,2), span()); - auto B = A(span(), span(), span()); - A += B; + auto A = subtensor(t1, span(1,1,2), span(0,2,2), span()); + auto B = subtensor(A, span(), span(), span(1)); std::cout << "% --------------------------- " << std::endl; - for (auto x: B.extents().base()) { + for (auto x: A.extents()) { std::cout << x << " "; } - tensor t2 = ones(1,2,2); - auto t3 = ublas::inner_prod(A, t2); + std::cout << std::endl; + std::cout << "% --------------------------- " << std::endl; + for (auto x: B.extents()) { + std::cout << x << " "; + } + std::cout << std::endl; + tensor t2 = ones(2,2,1); + auto t3 = ublas::inner_prod(B, t2); // // // formatted output // std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "t1=" << t1 << ";" << std::endl << std::endl; - std::cout << "A=" << A << ";" << std::endl << std::endl; + std::cout << "B=" << B << ";" << std::endl << std::endl; std::cout << "t2=" << t2 << ";" << std::endl << std::endl; std::cout << "t3=" << t3 << ";" << std::endl << std::endl; } catch (const std::exception& e) { diff --git a/include/boost/numeric/ublas/tensor/access.hpp b/include/boost/numeric/ublas/tensor/access.hpp index 98d81de4f..525282fd9 100644 --- a/include/boost/numeric/ublas/tensor/access.hpp +++ b/include/boost/numeric/ublas/tensor/access.hpp @@ -184,9 +184,9 @@ constexpr inline void compute_multi_index(std::size_t j, InputIt1 w, InputIt1 /* * @param v begin input iterator of a container with subtensor strides of length std::distance(w,wp) or greater */ template -constexpr inline auto compute_single_index(std::size_t jv, InputIt1 w, InputIt1 wp, InputIt2 v, std::size_t offset) +constexpr inline auto compute_single_index(std::size_t jv, InputIt1 w, InputIt1 wp, InputIt2 v) { - return std::inner_product(w,wp,v,offset, + return std::inner_product(w,wp,v,0ul, std::plus<>{}, [&jv](auto ww, auto vv) { auto k=jv/vv; jv-=vv*k; return ww*k; } ); diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 2962fb04c..81165c4eb 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -110,15 +110,6 @@ class span return (last_-first_) / step_ + value_type(1); } - inline span operator()(const span &rhs) const - { - auto const& lhs = *this; - return span( - rhs.first_*lhs.step_ + lhs.first_, - lhs.step_ *rhs.step_, - std::min(rhs.last_,size()) *lhs.step_ + lhs.first_ ); - } - protected: value_type first_, step_, last_ ; diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index a7b1c6c85..c32654416 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -1,4 +1,5 @@ // +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com // Copyright (c) 2021, Kannav Mehta, kmkannavkmehta@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See @@ -104,8 +105,7 @@ class tensor_core>> , _extents(t.extents()) , _strides(t.strides()) , _span_strides(t.strides()) - , _offset(size_type(0)) - , _tensor(t) + , _data(t.data()) { } @@ -116,8 +116,7 @@ class tensor_core>> , _extents{} , _strides{detail::to_span_strides(t.strides(), _spans)} , _span_strides{} - , _offset{detail::to_offset(t.strides(), _spans)} - , _tensor(t) + , _data{t.data() + detail::to_offset(t.strides(), _spans)} { _extents = detail::to_extents(_spans); _span_strides = ublas::to_strides(_extents,layout_type{}); @@ -133,32 +132,15 @@ class tensor_core>> std::cout << _strides[i] << " "; } std::cout << std::endl; - std::cout << _offset << std::endl; } - // TODO - // template - // tensor_core(const tensor_core& t, FS&& first, SL&&... spans) - // : tensor_expression_type{} - // , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) - // , _extents{} - // , _strides{} - // , _span_strides(detail::to_span_strides(t.strides(), _spans)) - // , _offset{detail::to_offset(t.strides(), _spans)} - // , _tensor(t._tensor) - // { - // _extents = detail::to_extents(_spans); - // _strides = ublas::to_strides(_extents,layout_type{}); - // } - tensor_core(tensor_core&& v) : tensor_expression_type{} , _spans (std::move(v._spans)) , _extents(std::move(v._extents)) , _strides(std::move(v._strides)) , _span_strides(std::move(v._span_strides)) - , _offset(std::move(v._offset)) - , _tensor(std::move(v._tensor)) + , _data(std::move(v._data)) { _extents = detail::to_extents(_spans); } @@ -218,7 +200,7 @@ class tensor_core>> "Number of provided indices does not match with tensor order."); } const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); - return _tensor[idx + _offset]; + return _data[idx]; } /** @brief Element access using a multi-index with bound checking which can @@ -241,7 +223,7 @@ class tensor_core>> "Number of provided indices does not match with tensor order."); } const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); - return _tensor[idx + _offset]; + return _data[idx]; } /** @brief Element access using a multi-index with bound checking which can @@ -283,8 +265,8 @@ class tensor_core>> */ [[nodiscard]] inline const_reference operator[](size_type i) const { - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); - return _tensor[idx]; + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin()); + return _data[idx]; } /** @brief Element access using a single index. @@ -296,9 +278,9 @@ class tensor_core>> [[nodiscard]] inline reference operator[](size_type i) { std::cout << "idx:" << i; - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin()); std::cout << "->" << idx << std::endl; - return _tensor[idx]; + return _data[idx]; } /** @brief Element access using a single-index with bound checking which can @@ -311,8 +293,8 @@ class tensor_core>> template [[nodiscard]] inline const_reference at(size_type i) const { - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); - return _tensor[idx]; + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin()); + return _data[idx]; } /** @brief Read tensor element of a tensor \c t with a single-index \c i @@ -323,8 +305,8 @@ class tensor_core>> */ [[nodiscard]] inline reference at(size_type i) { - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); - return _tensor[idx]; + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin()); + return _data[idx]; } /** @brief Generates a tensor_core index for tensor_core contraction @@ -366,11 +348,7 @@ class tensor_core>> "Cannot create subtensor " "Number of provided indices does not match with tensor order."); } - size_type n = size; - auto convert = [&] (auto arg) { - return _spans[--n](arg); - }; - return subtensor_type(_tensor, std::forward(convert(s)), std::forward(convert(spans))...); + return subtensor_type(*this, std::forward(s), std::forward(spans)...); } template @@ -382,11 +360,7 @@ class tensor_core>> "Cannot create subtensor " "Number of provided indices does not match with tensor order."); } - size_type n = size; - auto convert = [&] (auto arg) { - return _spans[--n](arg); - }; - return subtensor_type(_tensor, std::forward(convert(s)), std::forward(convert(spans))...); + return subtensor_type(*this, std::forward(s), std::forward(spans)...); } // [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } @@ -411,8 +385,8 @@ class tensor_core>> [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } [[nodiscard]] inline auto const& span_strides () const noexcept { return _span_strides; } [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } - [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + _offset; } - [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + _offset; } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _data; } + [[nodiscard]] inline auto data () noexcept -> pointer { return _data; } // [[nodiscard]] inline auto const& base () const noexcept { return _tensor.container(); } private: @@ -424,8 +398,7 @@ class tensor_core>> extents_type _extents; strides_type _strides; strides_type _span_strides; - std::size_t _offset; - tensor_type& _tensor; + pointer _data; }; template diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp index 33320060c..c2e91869b 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_engine.hpp @@ -1,4 +1,5 @@ // +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com // Copyright (c) 2021, Kannav Mehta, kmkannavkmehta@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp index 1a4b882f5..19f11dbd2 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -1,4 +1,5 @@ // +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com // Copyright (c) 2021, Kannav Mehta, kmkannavkmehta@gmail.com // // Distributed under the Boost Software License, Version 1.0. (See @@ -104,8 +105,7 @@ class tensor_core> , _extents(t.extents()) , _strides(t.strides()) , _span_strides(t.strides()) - , _offset(size_type(0)) - , _tensor(t) + , _data(t.data()) { } @@ -116,27 +116,13 @@ class tensor_core> , _extents{} , _strides{detail::to_span_strides(t.strides(), _spans)} , _span_strides{} - , _offset{detail::to_offset(t.strides(), _spans)} - , _tensor(t) + , _data{t.data() + detail::to_offset(t.strides(), _spans)} { _extents = detail::to_extents(_spans); _span_strides = ublas::to_strides(_extents,layout_type{}); } - // TODO - // template - // tensor_core(const tensor_core& t, FS&& first, SL&&... spans) - // : tensor_expression_type{} - // , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) - // , _extents{} - // , _strides{} - // , _span_strides(detail::to_span_strides(t.strides(), _spans)) - // , _offset{detail::to_offset(t.strides(), _spans)} - // , _tensor(t._tensor) - // { - // _extents = detail::to_extents(_spans); - // _strides = ublas::to_strides(_extents,layout_type{}); - // } + tensor_core(tensor_core&& v) : tensor_expression_type{} @@ -144,8 +130,7 @@ class tensor_core> , _extents(std::move(v._extents)) , _strides(std::move(v._strides)) , _span_strides(std::move(v._span_strides)) - , _offset(std::move(v._offset)) - , _tensor(std::move(v._tensor)) + , _data(std::move(v._data)) { _extents = detail::to_extents(_spans); } @@ -200,7 +185,7 @@ class tensor_core> { static_assert (sizeof...(is)+2 == std::tuple_size_v); const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); - return _tensor[idx + _offset]; + return _data[idx]; } /** @brief Element access using a multi-index with bound checking which can @@ -218,7 +203,7 @@ class tensor_core> { static_assert (sizeof...(Is)+2 == std::tuple_size_v); const auto idx = ublas::detail::to_index(_strides, i1, i2, is...); - return _tensor[idx + _offset]; + return _data[idx]; } /** @brief Element access using a multi-index with bound checking which can @@ -260,8 +245,8 @@ class tensor_core> */ [[nodiscard]] inline const_reference operator[](size_type i) const { - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); - return _tensor[idx]; + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _data); + return _data[idx]; } /** @brief Element access using a single index. @@ -273,9 +258,9 @@ class tensor_core> [[nodiscard]] inline reference operator[](size_type i) { std::cout << "idx:" << i; - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _data); std::cout << "->" << idx << std::endl; - return _tensor[idx]; + return _data[idx]; } /** @brief Element access using a single-index with bound checking which can @@ -288,8 +273,8 @@ class tensor_core> template [[nodiscard]] inline const_reference at(size_type i) const { - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); - return _tensor[idx]; + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _data); + return _data[idx]; } /** @brief Read tensor element of a tensor \c t with a single-index \c i @@ -300,8 +285,8 @@ class tensor_core> */ [[nodiscard]] inline reference at(size_type i) { - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _offset); - return _tensor[idx]; + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _data); + return _data[idx]; } /** @brief Generates a tensor_core index for tensor_core contraction @@ -334,11 +319,7 @@ class tensor_core> { constexpr auto size = sizeof...(spans)+1; static_assert(size == std::tuple_size_v); - size_type n = size; - auto convert = [&] (auto arg) { - return _spans[--n](arg); - }; - return subtensor_type(_tensor, std::forward(convert(s)), std::forward(convert(spans))...); + return subtensor_type(*this, std::forward(s), std::forward(spans)...); } template @@ -346,11 +327,7 @@ class tensor_core> { constexpr auto size = sizeof...(spans)+1; static_assert(size == std::tuple_size_v); - size_type n = size; - auto convert = [&] (auto arg) { - return _spans[--n](arg); - }; - return subtensor_type(_tensor, std::forward(convert(s)), std::forward(convert(spans))...); + return subtensor_type(*this, std::forward(s), std::forward(spans)...); } // [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } @@ -375,8 +352,8 @@ class tensor_core> [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } [[nodiscard]] inline auto const& span_strides () const noexcept { return _span_strides; } [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } - [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _tensor.data() + _offset;} - [[nodiscard]] inline auto data () noexcept -> pointer { return _tensor.data() + _offset; } + [[nodiscard]] inline auto data () const noexcept -> const_pointer { return _data;} + [[nodiscard]] inline auto data () noexcept -> pointer { return _data; } // [[nodiscard]] inline auto const& base () const noexcept { return _tensor.container(); } private: @@ -387,8 +364,7 @@ class tensor_core> extents_type _extents; strides_type _strides; strides_type _span_strides; - std::size_t _offset; - tensor_type& _tensor; + pointer _data; }; } // namespace boost::numeric::ublas diff --git a/test/tensor/test_expression_evaluation.cpp b/test/tensor/test_expression_evaluation.cpp index 5863aa963..c927d0f7c 100644 --- a/test/tensor/test_expression_evaluation.cpp +++ b/test/tensor/test_expression_evaluation.cpp @@ -11,8 +11,6 @@ // - - #include #include #include @@ -149,7 +147,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_expression_all_extents_equal, valu using value_t = typename value::first_type; using layout_t = typename value::second_type; using tensor_t = ublas::tensor_dynamic; - + auto uplus1 = [](auto const& a){ return a + value_t(1); }; auto uplus2 = [](auto const& a){ return value_t(2) + a; }; auto bplus = std::plus {}; From e0134952d8b3ab6f7a51673ef32a2c8b5f9081f4 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Mon, 23 Aug 2021 09:48:12 +0530 Subject: [PATCH 35/40] subtensor expression eval --- .../ublas/tensor/tensor/subtensor_dynamic.hpp | 9 + .../tensor/tensor/subtensor_static_rank.hpp | 17 +- test/tensor/Jamfile | 64 +- test/tensor/test_access.cpp | 2 +- .../test_subtensor_expression_evaluation.cpp | 117 ++-- test/tensor/test_subtensor_extents.cpp | 555 ------------------ test/tensor/test_subtensor_matrix_vector.cpp | 186 ------ 7 files changed, 118 insertions(+), 832 deletions(-) delete mode 100644 test/tensor/test_subtensor_extents.cpp diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index c32654416..a7241aca5 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -363,6 +363,15 @@ class tensor_core>> return subtensor_type(*this, std::forward(s), std::forward(spans)...); } + friend void swap(tensor_core& lhs, tensor_core& rhs) + { + std::swap(lhs._extents , rhs._extents ); + std::swap(lhs._strides , rhs._strides ); + std::swap(lhs._span_strides , rhs._span_strides); + std::swap(lhs._spans , rhs._spans); + std::swap(lhs._data , rhs._data); + } + // [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } // [[nodiscard]] inline auto end () const noexcept -> const_iterator { return _container.end (); } // [[nodiscard]] inline auto begin () noexcept -> iterator { return _container.begin (); } diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp index 19f11dbd2..9a2c2f875 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -330,6 +330,15 @@ class tensor_core> return subtensor_type(*this, std::forward(s), std::forward(spans)...); } + friend void swap(tensor_core& lhs, tensor_core& rhs) + { + std::swap(lhs._extents , rhs._extents ); + std::swap(lhs._strides , rhs._strides ); + std::swap(lhs._span_strides , rhs._span_strides); + std::swap(lhs._spans , rhs._spans); + std::swap(lhs._data , rhs._data); + } + // [[nodiscard]] inline auto begin () const noexcept -> const_iterator { return _container.begin (); } // [[nodiscard]] inline auto end () const noexcept -> const_iterator { return _container.end (); } // [[nodiscard]] inline auto begin () noexcept -> iterator { return _container.begin (); } @@ -361,10 +370,10 @@ class tensor_core> * @brief There might be cases where spans cannot be computed on creation */ std::array _spans; - extents_type _extents; - strides_type _strides; - strides_type _span_strides; - pointer _data; + extents_type _extents; + strides_type _strides; + strides_type _span_strides; + pointer _data; }; } // namespace boost::numeric::ublas diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index 99f1ff47f..a2aa4531c 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -32,47 +32,47 @@ explicit unit_test_framework ; test-suite boost-ublas-tensor-test : - [ run test_access.cpp - test_algorithms.cpp - test_einstein_notation.cpp - test_expression.cpp - test_expression_evaluation.cpp - test_extents_dynamic.cpp - test_extents_dynamic_rank_static.cpp - test_extents_functions.cpp + [ run # test_access.cpp + # test_algorithms.cpp + # test_einstein_notation.cpp + # test_expression.cpp + # test_expression_evaluation.cpp + # test_extents_dynamic.cpp + # test_extents_dynamic_rank_static.cpp + # test_extents_functions.cpp test_fixed_rank_expression_evaluation.cpp - test_fixed_rank_extents.cpp - test_fixed_rank_functions.cpp - test_fixed_rank_operators_arithmetic.cpp - test_fixed_rank_operators_comparison.cpp - test_fixed_rank_strides.cpp + # test_fixed_rank_extents.cpp + # test_fixed_rank_functions.cpp + # test_fixed_rank_operators_arithmetic.cpp + # test_fixed_rank_operators_comparison.cpp + # test_fixed_rank_strides.cpp test_fixed_rank_tensor.cpp - test_fixed_rank_tensor_matrix_vector.cpp - test_functions.cpp - test_multi_index.cpp - test_multi_index_utility.cpp - test_multiplication.cpp - test_operators_arithmetic.cpp - test_operators_comparison.cpp + # test_fixed_rank_tensor_matrix_vector.cpp + # test_functions.cpp + # test_multi_index.cpp + # test_multi_index_utility.cpp + # test_multiplication.cpp + # test_operators_arithmetic.cpp + # test_operators_comparison.cpp # test_span.cpp - test_static_expression_evaluation.cpp - test_static_extents.cpp - test_static_operators_arithmetic.cpp - test_static_operators_comparison.cpp - test_static_strides.cpp - test_static_tensor.cpp - test_static_tensor_matrix_vector.cpp - test_strides.cpp + # test_static_expression_evaluation.cpp + # test_static_extents.cpp + # test_static_operators_arithmetic.cpp + # test_static_operators_comparison.cpp + # test_static_strides.cpp + # test_static_tensor.cpp + # test_static_tensor_matrix_vector.cpp + # test_strides.cpp # test_subtensor.cpp - # test_subtensor_expression_evaluation.cpp + # test_subtensor_expression_evaluation.cpp # test_subtensor_extents.cpp # test_subtensor_matrix_vector.cpp # test_subtensor_operators_arithmetic.cpp # test_subtensor_operators_comparison.cpp - test_subtensor_utility.cpp + # test_subtensor_utility.cpp test_tensor.cpp - test_tensor_matrix_vector.cpp - unit_test_framework + # test_tensor_matrix_vector.cpp + # unit_test_framework : : : diff --git a/test/tensor/test_access.cpp b/test/tensor/test_access.cpp index dc28a31e1..dd0b08607 100644 --- a/test/tensor/test_access.cpp +++ b/test/tensor/test_access.cpp @@ -301,7 +301,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_compute_single_index_subtensor, layout_t, auto const& jref = index[k]; for(auto kk = 0u; kk < jref.size(); ++kk){ auto const jj = jref[kk]; - auto const j = ub::detail::compute_single_index(jj,w.begin(),w.end(),w.begin(), 0); + auto const j = ub::detail::compute_single_index(jj,w.begin(),w.end(),w.begin()); BOOST_CHECK_EQUAL ( j, jj ) ; } } diff --git a/test/tensor/test_subtensor_expression_evaluation.cpp b/test/tensor/test_subtensor_expression_evaluation.cpp index a1e06885b..4b41830dc 100644 --- a/test/tensor/test_subtensor_expression_evaluation.cpp +++ b/test/tensor/test_subtensor_expression_evaluation.cpp @@ -22,7 +22,7 @@ #include #include -BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_expression) +BOOST_AUTO_TEST_SUITE(test_subtensor_static_rank_expression) using test_types = zip>::with_t; @@ -49,7 +49,7 @@ struct fixture }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_retrieve_extents, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_expression_retrieve_extents, value, test_types, fixture) { namespace ublas = boost::numeric::ublas; using value_t = typename value::first_type; @@ -66,31 +66,31 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_retrieve_ex static constexpr auto size = std::tuple_size_v>; using tensor_t = ublas::tensor_static_rank; - + using subtensor = typename tensor_t::subtensor_type; auto t = tensor_t(e); auto v = value_t{}; for(auto& tt: t){ tt = v; v+=value_t{1}; } + auto s = subtensor(t); + BOOST_CHECK( ublas::detail::retrieve_extents( s ) == e ); - BOOST_CHECK( ublas::detail::retrieve_extents( t ) == e ); - - // uexpr1 = t+1 - // uexpr2 = 2+t - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); + // uexpr1 = s+1 + // uexpr2 = 2+s + auto uexpr1 = ublas::detail::make_unary_tensor_expression( s, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( s, uplus2 ); BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) == e ); BOOST_CHECK( ublas::detail::retrieve_extents( uexpr2 ) == e ); - // bexpr_uexpr = (t+1) + (2+t) + // bexpr_uexpr = (s+1) + (2+s) auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == e ); - // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + // bexpr_bexpr_uexpr = ((s+1) + (2+s)) - s + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s, bminus ); BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr ) == e ); @@ -105,6 +105,8 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_retrieve_ex constexpr auto size1 = std::tuple_size_v>; using tensor_type1 = ublas::tensor_static_rank; + using subtensor_type1 = typename tensor_type1::subtensor_type; + for_each_in_tuple(extents, [&,I](auto J, auto const& e2){ @@ -115,6 +117,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_retrieve_ex static constexpr auto size1 = std::tuple_size_v>; static constexpr auto size2 = std::tuple_size_v>; using tensor_type2 = ublas::tensor_static_rank; + using subtensor_type2 = typename tensor_type2::subtensor_type; auto v = value_t{}; @@ -124,34 +127,38 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_retrieve_ex tensor_type2 t2(e2); for(auto& tt: t2){ tt = v; v+=value_t{2}; } - BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) != ublas::detail::retrieve_extents( t2 ) ); + auto s1 = subtensor_type1(t1); + auto s2 = subtensor_type2(t2); - // uexpr1 = t1+1 - // uexpr2 = 2+t2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); - BOOST_CHECK( ublas::detail::retrieve_extents( t1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( t2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) != ublas::detail::retrieve_extents( s2 ) ); + + // uexpr1 = s1+1 + // uexpr2 = 2+s2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( s1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( s2, uplus2 ); + + BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( s2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) != ublas::detail::retrieve_extents( uexpr2 ) ); if constexpr( size1 == size2 ){ - // bexpr_uexpr = (t1+1) + (2+t2) + // bexpr_uexpr = (s1+1) + (2+s2) auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(t1) ); + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(s1) ); - // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + // bexpr_bexpr_uexpr = ((s1+1) + (2+s2)) - s2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s2, bminus ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(t2) ); + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(s2) ); - // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + // bexpr_bexpr_uexpr = s2 - ((s1+1) + (2+s2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( s2, bexpr_uexpr, bminus ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(t2) ); + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(s2) ); } }); @@ -160,11 +167,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_retrieve_ex - - - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_all_extents_equal, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_expression_all_extents_equal, value, test_types, fixture) { namespace ublas = boost::numeric::ublas; using value_t = typename value::first_type; @@ -178,32 +181,33 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_all_extents for_each_in_tuple(extents, [&](auto const& /*unused*/, auto& e){ static constexpr auto size = std::tuple_size_v>; using tensor_t = ublas::tensor_static_rank; - + using subtensor = typename tensor_t::subtensor_type; auto t = tensor_t(e); auto v = value_t{}; for(auto& tt: t){ tt = v; v+=value_t{1}; } + auto s = subtensor(t); - BOOST_CHECK( ublas::detail::all_extents_equal( t , e ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( s , e ) ); - // uexpr1 = t+1 - // uexpr2 = 2+t - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t, uplus2 ); + // uexpr1 = s+1 + // uexpr2 = 2+s + auto uexpr1 = ublas::detail::make_unary_tensor_expression( s, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( s, uplus2 ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, e ) ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, e ) ); - // bexpr_uexpr = (t+1) + (2+t) + // bexpr_uexpr = (s+1) + (2+s) auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_uexpr, e ) ); - // bexpr_bexpr_uexpr = ((t+1) + (2+t)) - t - auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t, bminus ); + // bexpr_bexpr_uexpr = ((s+1) + (2+s)) - s + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s, bminus ); BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_bexpr_uexpr , e ) ); @@ -218,6 +222,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_all_extents static constexpr auto size1 = std::tuple_size_v>; using tensor_type1 = ublas::tensor_static_rank; + using subtensor_type1 = typename tensor_type1::subtensor_type; for_each_in_tuple(extents, [&](auto J, auto& e2){ @@ -228,6 +233,7 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_all_extents static constexpr auto size2 = std::tuple_size_v>; using tensor_type2 = ublas::tensor_static_rank; + using subtensor_type2 = typename tensor_type2::subtensor_type; auto v = value_t{}; @@ -237,44 +243,47 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_static_rank_expression_all_extents tensor_type2 t2(e2); for(auto& tt: t2){ tt = v; v+=value_t{2}; } + auto s1 = subtensor_type1(t1); + auto s2 = subtensor_type2(t2); + BOOST_CHECK( ublas::detail::all_extents_equal( t1, ublas::detail::retrieve_extents(t1) ) ); BOOST_CHECK( ublas::detail::all_extents_equal( t2, ublas::detail::retrieve_extents(t2) ) ); - // uexpr1 = t1+1 - // uexpr2 = 2+t2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( t1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( t2, uplus2 ); + // uexpr1 = s1+1 + // uexpr2 = 2+s2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( s1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( s2, uplus2 ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, ublas::detail::retrieve_extents(uexpr1) ) ); BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, ublas::detail::retrieve_extents(uexpr2) ) ); if constexpr( size1 == size2 ){ - // bexpr_uexpr = (t1+1) + (2+t2) + // bexpr_uexpr = (t1+1) + (2+s2) auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr, ublas::detail::retrieve_extents( bexpr_uexpr ) ) ); - // bexpr_bexpr_uexpr = ((t1+1) + (2+t2)) - t2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, t2, bminus ); + // bexpr_bexpr_uexpr = ((t1+1) + (2+s2)) - s2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s2, bminus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr1, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) ) ); - // bexpr_bexpr_uexpr = t2 - ((t1+1) + (2+t2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( t2, bexpr_uexpr, bminus ); + // bexpr_bexpr_uexpr = s2 - ((t1+1) + (2+s2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( s2, bexpr_uexpr, bminus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) ) ); - // bexpr_uexpr2 = (t1+1) + t2 - auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, t2, bplus ); + // bexpr_uexpr2 = (t1+1) + s2 + auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, s2, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_uexpr2 ) ) ); - // bexpr_uexpr2 = ((t1+1) + t2) + t1 + // bexpr_uexpr2 = ((t1+1) + s2) + t1 auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr3, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr3 ) ) ); - // bexpr_uexpr2 = t1 + (((t1+1) + t2) + t1) + // bexpr_uexpr2 = t1 + (((t1+1) + s2) + t1) auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr4, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr4 ) ) ); } diff --git a/test/tensor/test_subtensor_extents.cpp b/test/tensor/test_subtensor_extents.cpp deleted file mode 100644 index ac873f55c..000000000 --- a/test/tensor/test_subtensor_extents.cpp +++ /dev/null @@ -1,555 +0,0 @@ -// -// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - -#include -#include -#include - -BOOST_AUTO_TEST_SUITE ( test_extents_static_size ) - - -//*boost::unit_test::label("extents") -//*boost::unit_test::label("constructor") - -BOOST_AUTO_TEST_CASE(test_extents_static_size_ctor) -{ - namespace ub = boost::numeric::ublas; - - -// auto e = ub::extents<0>{}; - auto e11 = ub::extents<2>{1,1}; - auto e12 = ub::extents<2>{1,2}; - auto e21 = ub::extents<2>{2,1}; - auto e23 = ub::extents<2>{2,3}; - auto e231 = ub::extents<3>{2,3,1}; - auto e123 = ub::extents<3>{1,2,3}; // 6 - auto e423 = ub::extents<3>{4,2,3}; // 7 - - - BOOST_CHECK (!ub::empty(e11)); - BOOST_CHECK (!ub::empty(e12)); - BOOST_CHECK (!ub::empty(e21)); - BOOST_CHECK (!ub::empty(e23)); - BOOST_CHECK (!ub::empty(e231)); - BOOST_CHECK (!ub::empty(e123)); - BOOST_CHECK (!ub::empty(e423)); - - BOOST_CHECK ( ub::size (e11) == 2); - BOOST_CHECK ( ub::size (e12) == 2); - BOOST_CHECK ( ub::size (e21) == 2); - BOOST_CHECK ( ub::size (e23) == 2); - BOOST_CHECK ( ub::size(e231) == 3); - BOOST_CHECK ( ub::size(e123) == 3); - BOOST_CHECK ( ub::size(e423) == 3); - - - BOOST_CHECK_THROW( ub::extents<2>({1,0}), std::invalid_argument); - BOOST_CHECK_THROW( ub::extents<1>({0} ), std::invalid_argument); - BOOST_CHECK_THROW( ub::extents<2>({0,1}), std::invalid_argument); - BOOST_CHECK_THROW( ub::extents<2>({1,1,2}), std::length_error); -} - - -struct fixture { - template - using extents = boost::numeric::ublas::extents; - -// extents<0> de {}; - - extents<2> de11 {1,1}; - extents<2> de12 {1,2}; - extents<2> de21 {2,1}; - - extents<2> de23 {2,3}; - extents<3> de231 {2,3,1}; - extents<3> de123 {1,2,3}; - extents<4> de1123 {1,1,2,3}; - extents<5> de12311 {1,2,3,1,1}; - - extents<3> de423 {4,2,3}; - extents<4> de4213 {4,2,1,3}; - extents<5> de42131 {4,2,1,3,1}; - extents<6> de142131 {1,4,2,1,3,1}; - - extents<3> de141 {1,4,1}; - extents<4> de1111 {1,1,1,1}; - extents<5> de14111 {1,4,1,1,1}; - extents<6> de112111 {1,1,2,1,1,1}; - extents<6> de112311 {1,1,2,3,1,1}; -}; - -BOOST_FIXTURE_TEST_CASE(test_extents_static_size_access, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("access")) -{ - - namespace ublas = boost::numeric::ublas; - -// BOOST_REQUIRE_EQUAL(ublas::size(de), 0); -// BOOST_CHECK (ublas::empty(de) ); - - BOOST_REQUIRE_EQUAL(ublas::size(de11) , 2); - BOOST_REQUIRE_EQUAL(ublas::size(de12) , 2); - BOOST_REQUIRE_EQUAL(ublas::size(de21) , 2); - BOOST_REQUIRE_EQUAL(ublas::size(de23) , 2); - BOOST_REQUIRE_EQUAL(ublas::size(de231) , 3); - BOOST_REQUIRE_EQUAL(ublas::size(de123) , 3); - BOOST_REQUIRE_EQUAL(ublas::size(de1123) , 4); - BOOST_REQUIRE_EQUAL(ublas::size(de12311) , 5); - BOOST_REQUIRE_EQUAL(ublas::size(de423) , 3); - BOOST_REQUIRE_EQUAL(ublas::size(de4213) , 4); - BOOST_REQUIRE_EQUAL(ublas::size(de42131) , 5); - BOOST_REQUIRE_EQUAL(ublas::size(de142131), 6); - BOOST_REQUIRE_EQUAL(ublas::size(de141) , 3); - BOOST_REQUIRE_EQUAL(ublas::size(de1111) , 4); - BOOST_REQUIRE_EQUAL(ublas::size(de14111) , 5); - BOOST_REQUIRE_EQUAL(ublas::size(de112111), 6); - BOOST_REQUIRE_EQUAL(ublas::size(de112311), 6); - - - BOOST_CHECK_EQUAL(de11[0],1); - BOOST_CHECK_EQUAL(de11[1],1); - - BOOST_CHECK_EQUAL(de12[0],1); - BOOST_CHECK_EQUAL(de12[1],2); - - BOOST_CHECK_EQUAL(de21[0],2); - BOOST_CHECK_EQUAL(de21[1],1); - - BOOST_CHECK_EQUAL(de23[0],2); - BOOST_CHECK_EQUAL(de23[1],3); - - BOOST_CHECK_EQUAL(de231[0],2); - BOOST_CHECK_EQUAL(de231[1],3); - BOOST_CHECK_EQUAL(de231[2],1); - - BOOST_CHECK_EQUAL(de123[0],1); - BOOST_CHECK_EQUAL(de123[1],2); - BOOST_CHECK_EQUAL(de123[2],3); - - BOOST_CHECK_EQUAL(de1123[0],1); - BOOST_CHECK_EQUAL(de1123[1],1); - BOOST_CHECK_EQUAL(de1123[2],2); - BOOST_CHECK_EQUAL(de1123[3],3); - - BOOST_CHECK_EQUAL(de12311[0],1); - BOOST_CHECK_EQUAL(de12311[1],2); - BOOST_CHECK_EQUAL(de12311[2],3); - BOOST_CHECK_EQUAL(de12311[3],1); - BOOST_CHECK_EQUAL(de12311[4],1); - - BOOST_CHECK_EQUAL(de423[0],4); - BOOST_CHECK_EQUAL(de423[1],2); - BOOST_CHECK_EQUAL(de423[2],3); - - BOOST_CHECK_EQUAL(de4213[0],4); - BOOST_CHECK_EQUAL(de4213[1],2); - BOOST_CHECK_EQUAL(de4213[2],1); - BOOST_CHECK_EQUAL(de4213[3],3); - - BOOST_CHECK_EQUAL(de42131[0],4); - BOOST_CHECK_EQUAL(de42131[1],2); - BOOST_CHECK_EQUAL(de42131[2],1); - BOOST_CHECK_EQUAL(de42131[3],3); - BOOST_CHECK_EQUAL(de42131[4],1); - - BOOST_CHECK_EQUAL(de142131[0],1); - BOOST_CHECK_EQUAL(de142131[1],4); - BOOST_CHECK_EQUAL(de142131[2],2); - BOOST_CHECK_EQUAL(de142131[3],1); - BOOST_CHECK_EQUAL(de142131[4],3); - BOOST_CHECK_EQUAL(de142131[5],1); - - BOOST_CHECK_EQUAL(de141[0],1); - BOOST_CHECK_EQUAL(de141[1],4); - BOOST_CHECK_EQUAL(de141[2],1); - - BOOST_CHECK_EQUAL(de1111[0],1); - BOOST_CHECK_EQUAL(de1111[1],1); - BOOST_CHECK_EQUAL(de1111[2],1); - BOOST_CHECK_EQUAL(de1111[3],1); - - BOOST_CHECK_EQUAL(de14111[0],1); - BOOST_CHECK_EQUAL(de14111[1],4); - BOOST_CHECK_EQUAL(de14111[2],1); - BOOST_CHECK_EQUAL(de14111[3],1); - BOOST_CHECK_EQUAL(de14111[4],1); - - BOOST_CHECK_EQUAL(de112111[0],1); - BOOST_CHECK_EQUAL(de112111[1],1); - BOOST_CHECK_EQUAL(de112111[2],2); - BOOST_CHECK_EQUAL(de112111[3],1); - BOOST_CHECK_EQUAL(de112111[4],1); - BOOST_CHECK_EQUAL(de112111[5],1); - - BOOST_CHECK_EQUAL(de112311[0],1); - BOOST_CHECK_EQUAL(de112311[1],1); - BOOST_CHECK_EQUAL(de112311[2],2); - BOOST_CHECK_EQUAL(de112311[3],3); - BOOST_CHECK_EQUAL(de112311[4],1); - BOOST_CHECK_EQUAL(de112311[5],1); -} - -BOOST_FIXTURE_TEST_CASE(test_extents_static_size_copy_ctor, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("copy_ctor")) -{ - namespace ublas = boost::numeric::ublas; - -// auto e = de; - auto e1 = de11; - auto e12 = de12; - auto e21 = de21; - auto e23 = de23; - auto e231 = de231; - auto e123 = de123; - auto e1123 = de1123; - auto e12311 = de12311; - auto e423 = de423; - auto e4213 = de4213; - auto e42131 = de42131; - auto e142131 = de142131; - auto e141 = de141; - auto e1111 = de1111; - auto e14111 = de14111; - auto e112111 = de112111; - auto e112311 = de112311; - - -// BOOST_CHECK (ublas::empty(e) ); - -// BOOST_REQUIRE_EQUAL(ublas::size(e) , 0); - BOOST_REQUIRE_EQUAL(ublas::size(e1) , 2); - BOOST_REQUIRE_EQUAL(ublas::size(e12) , 2); - BOOST_REQUIRE_EQUAL(ublas::size(e21) , 2); - BOOST_REQUIRE_EQUAL(ublas::size(e23) , 2); - BOOST_REQUIRE_EQUAL(ublas::size(e231), 3); - BOOST_REQUIRE_EQUAL(ublas::size(e123), 3); - BOOST_REQUIRE_EQUAL(ublas::size(e1123), 4); - BOOST_REQUIRE_EQUAL(ublas::size(e12311), 5); - BOOST_REQUIRE_EQUAL(ublas::size(e423), 3); - BOOST_REQUIRE_EQUAL(ublas::size(e4213), 4); - BOOST_REQUIRE_EQUAL(ublas::size(e42131), 5); - BOOST_REQUIRE_EQUAL(ublas::size(e142131), 6); - BOOST_REQUIRE_EQUAL(ublas::size(e141), 3); - BOOST_REQUIRE_EQUAL(ublas::size(e1111), 4); - BOOST_REQUIRE_EQUAL(ublas::size(e14111), 5); - BOOST_REQUIRE_EQUAL(ublas::size(e112111), 6); - BOOST_REQUIRE_EQUAL(ublas::size(e112311), 6); - - - BOOST_CHECK_EQUAL(e1[0],1); - BOOST_CHECK_EQUAL(e1[1],1); - - BOOST_CHECK_EQUAL(e12[0],1); - BOOST_CHECK_EQUAL(e12[1],2); - - BOOST_CHECK_EQUAL(e21[0],2); - BOOST_CHECK_EQUAL(e21[1],1); - - BOOST_CHECK_EQUAL(e23[0],2); - BOOST_CHECK_EQUAL(e23[1],3); - - BOOST_CHECK_EQUAL(e231[0],2); - BOOST_CHECK_EQUAL(e231[1],3); - BOOST_CHECK_EQUAL(e231[2],1); - - BOOST_CHECK_EQUAL(e123[0],1); - BOOST_CHECK_EQUAL(e123[1],2); - BOOST_CHECK_EQUAL(e123[2],3); - - BOOST_CHECK_EQUAL(e1123[0],1); - BOOST_CHECK_EQUAL(e1123[1],1); - BOOST_CHECK_EQUAL(e1123[2],2); - BOOST_CHECK_EQUAL(e1123[3],3); - - BOOST_CHECK_EQUAL(e12311[0],1); - BOOST_CHECK_EQUAL(e12311[1],2); - BOOST_CHECK_EQUAL(e12311[2],3); - BOOST_CHECK_EQUAL(e12311[3],1); - BOOST_CHECK_EQUAL(e12311[4],1); - - BOOST_CHECK_EQUAL(e423[0],4); - BOOST_CHECK_EQUAL(e423[1],2); - BOOST_CHECK_EQUAL(e423[2],3); - - BOOST_CHECK_EQUAL(e4213[0],4); - BOOST_CHECK_EQUAL(e4213[1],2); - BOOST_CHECK_EQUAL(e4213[2],1); - BOOST_CHECK_EQUAL(e4213[3],3); - - BOOST_CHECK_EQUAL(e42131[0],4); - BOOST_CHECK_EQUAL(e42131[1],2); - BOOST_CHECK_EQUAL(e42131[2],1); - BOOST_CHECK_EQUAL(e42131[3],3); - BOOST_CHECK_EQUAL(e42131[4],1); - - BOOST_CHECK_EQUAL(e142131[0],1); - BOOST_CHECK_EQUAL(e142131[1],4); - BOOST_CHECK_EQUAL(e142131[2],2); - BOOST_CHECK_EQUAL(e142131[3],1); - BOOST_CHECK_EQUAL(e142131[4],3); - BOOST_CHECK_EQUAL(e142131[5],1); - - BOOST_CHECK_EQUAL(e141[0],1); - BOOST_CHECK_EQUAL(e141[1],4); - BOOST_CHECK_EQUAL(e141[2],1); - - BOOST_CHECK_EQUAL(e1111[0],1); - BOOST_CHECK_EQUAL(e1111[1],1); - BOOST_CHECK_EQUAL(e1111[2],1); - BOOST_CHECK_EQUAL(e1111[3],1); - - BOOST_CHECK_EQUAL(e14111[0],1); - BOOST_CHECK_EQUAL(e14111[1],4); - BOOST_CHECK_EQUAL(e14111[2],1); - BOOST_CHECK_EQUAL(e14111[3],1); - BOOST_CHECK_EQUAL(e14111[4],1); - - BOOST_CHECK_EQUAL(e112111[0],1); - BOOST_CHECK_EQUAL(e112111[1],1); - BOOST_CHECK_EQUAL(e112111[2],2); - BOOST_CHECK_EQUAL(e112111[3],1); - BOOST_CHECK_EQUAL(e112111[4],1); - BOOST_CHECK_EQUAL(e112111[5],1); - - BOOST_CHECK_EQUAL(e112311[0],1); - BOOST_CHECK_EQUAL(e112311[1],1); - BOOST_CHECK_EQUAL(e112311[2],2); - BOOST_CHECK_EQUAL(e112311[3],3); - BOOST_CHECK_EQUAL(e112311[4],1); - BOOST_CHECK_EQUAL(e112311[5],1); - -} - -BOOST_FIXTURE_TEST_CASE(test_extents_static_size_is, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("query")) -{ - namespace ublas = boost::numeric::ublas; - - -// auto e = de; - auto e11 = de11; - auto e12 = de12; - auto e21 = de21; - auto e23 = de23; - auto e231 = de231; - auto e123 = de123; - auto e1123 = de1123; - auto e12311 = de12311; - auto e423 = de423; - auto e4213 = de4213; - auto e42131 = de42131; - auto e142131 = de142131; - auto e141 = de141; - auto e1111 = de1111; - auto e14111 = de14111; - auto e112111 = de112111; - auto e112311 = de112311; - -// BOOST_CHECK( ublas::empty (e)); -// BOOST_CHECK( ! ublas::is_scalar(e)); -// BOOST_CHECK( ! ublas::is_vector(e)); -// BOOST_CHECK( ! ublas::is_matrix(e)); -// BOOST_CHECK( ! ublas::is_tensor(e)); - - BOOST_CHECK( ! ublas::empty (e11) ); - BOOST_CHECK( ublas::is_scalar(e11) ); - BOOST_CHECK( ublas::is_vector(e11) ); - BOOST_CHECK( ublas::is_matrix(e11) ); - BOOST_CHECK( ! ublas::is_tensor(e11) ); - - BOOST_CHECK( ! ublas::empty (e12) ); - BOOST_CHECK( ! ublas::is_scalar(e12) ); - BOOST_CHECK( ublas::is_vector(e12) ); - BOOST_CHECK( ublas::is_matrix(e12) ); - BOOST_CHECK( ! ublas::is_tensor(e12) ); - - BOOST_CHECK( ! ublas::empty (e21) ); - BOOST_CHECK( ! ublas::is_scalar(e21) ); - BOOST_CHECK( ublas::is_vector(e21) ); - BOOST_CHECK( ublas::is_matrix(e21) ); - BOOST_CHECK( ! ublas::is_tensor(e21) ); - - BOOST_CHECK( ! ublas::empty (e23) ); - BOOST_CHECK( ! ublas::is_scalar(e23) ); - BOOST_CHECK( ! ublas::is_vector(e23) ); - BOOST_CHECK( ublas::is_matrix(e23) ); - BOOST_CHECK( ! ublas::is_tensor(e23) ); - - BOOST_CHECK( ! ublas::empty (e231) ); - BOOST_CHECK( ! ublas::is_scalar(e231) ); - BOOST_CHECK( ! ublas::is_vector(e231) ); - BOOST_CHECK( ublas::is_matrix(e231) ); - BOOST_CHECK( ! ublas::is_tensor(e231) ); - - BOOST_CHECK( ! ublas::empty (e123) ); - BOOST_CHECK( ! ublas::is_scalar(e123) ); - BOOST_CHECK( ! ublas::is_vector(e123) ); - BOOST_CHECK( ! ublas::is_matrix(e123) ); - BOOST_CHECK( ublas::is_tensor(e123) ); - - BOOST_CHECK( ! ublas::empty (e1123) ); - BOOST_CHECK( ! ublas::is_scalar(e1123) ); - BOOST_CHECK( ! ublas::is_vector(e1123) ); - BOOST_CHECK( ! ublas::is_matrix(e1123) ); - BOOST_CHECK( ublas::is_tensor(e1123) ); - - BOOST_CHECK( ! ublas::empty (e12311) ); - BOOST_CHECK( ! ublas::is_scalar(e12311) ); - BOOST_CHECK( ! ublas::is_vector(e12311) ); - BOOST_CHECK( ! ublas::is_matrix(e12311) ); - BOOST_CHECK( ublas::is_tensor(e12311) ); - - BOOST_CHECK( ! ublas::empty (e423) ); - BOOST_CHECK( ! ublas::is_scalar(e423) ); - BOOST_CHECK( ! ublas::is_vector(e423) ); - BOOST_CHECK( ! ublas::is_matrix(e423) ); - BOOST_CHECK( ublas::is_tensor(e423) ); - - BOOST_CHECK( ! ublas::empty (e4213) ); - BOOST_CHECK( ! ublas::is_scalar(e4213) ); - BOOST_CHECK( ! ublas::is_vector(e4213) ); - BOOST_CHECK( ! ublas::is_matrix(e4213) ); - BOOST_CHECK( ublas::is_tensor(e4213) ); - - BOOST_CHECK( ! ublas::empty (e42131) ); - BOOST_CHECK( ! ublas::is_scalar(e42131) ); - BOOST_CHECK( ! ublas::is_vector(e42131) ); - BOOST_CHECK( ! ublas::is_matrix(e42131) ); - BOOST_CHECK( ublas::is_tensor(e42131) ); - - BOOST_CHECK( ! ublas::empty (e142131) ); - BOOST_CHECK( ! ublas::is_scalar(e142131) ); - BOOST_CHECK( ! ublas::is_vector(e142131) ); - BOOST_CHECK( ! ublas::is_matrix(e142131) ); - BOOST_CHECK( ublas::is_tensor(e142131) ); - - BOOST_CHECK( ! ublas::empty (e141) ); - BOOST_CHECK( ! ublas::is_scalar(e141) ); - BOOST_CHECK( ublas::is_vector(e141) ); - BOOST_CHECK( ublas::is_matrix(e141) ); - BOOST_CHECK( ! ublas::is_tensor(e141) ); - - BOOST_CHECK( ! ublas::empty (e1111) ); - BOOST_CHECK( ublas::is_scalar(e1111) ); - BOOST_CHECK( ublas::is_vector(e1111) ); - BOOST_CHECK( ublas::is_matrix(e1111) ); - BOOST_CHECK( ! ublas::is_tensor(e1111) ); - - BOOST_CHECK( ! ublas::empty (e14111) ); - BOOST_CHECK( ! ublas::is_scalar(e14111) ); - BOOST_CHECK( ublas::is_vector(e14111) ); - BOOST_CHECK( ublas::is_matrix(e14111) ); - BOOST_CHECK( ! ublas::is_tensor(e14111) ); - - BOOST_CHECK( ! ublas::empty (e112111) ); - BOOST_CHECK( ! ublas::is_scalar(e112111) ); - BOOST_CHECK( ! ublas::is_vector(e112111) ); - BOOST_CHECK( ! ublas::is_matrix(e112111) ); - BOOST_CHECK( ublas::is_tensor(e112111) ); - - BOOST_CHECK( ! ublas::empty (e112311) ); - BOOST_CHECK( ! ublas::is_scalar(e112311) ); - BOOST_CHECK( ! ublas::is_vector(e112311) ); - BOOST_CHECK( ! ublas::is_matrix(e112311) ); - BOOST_CHECK( ublas::is_tensor(e112311) ); -} - -//BOOST_FIXTURE_TEST_CASE(test_extents_static_size_squeeze, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("squeeze")) -//{ -// auto e1 = squeeze(de1); // {1,1} -// auto e2 = squeeze(de2); // {1,2} -// auto 21 = squeeze(d21); // {2,1} - -// auto e4 = squeeze(de4); // {2,3} -// auto e231 = squeeze(de231); // {2,3} -// auto e123 = squeeze(de123); // {2,3} -// auto e1123 = squeeze(de1123); // {2,3} -// auto e12311 = squeeze(de12311); // {2,3} - -// auto e423 = squeeze(de423); // {4,2,3} -// auto e4213 = squeeze(de4213); // {4,2,3} -// auto e11 = squeeze(de11); // {4,2,3} -// auto e12 = squeeze(e142131); // {4,2,3} - -// auto e141 = squeeze(de141); // {1,4} -// auto e1111 = squeeze(de1111); // {1,1} -// auto e14111 = squeeze(de14111); // {1,4} -// auto e112111 = squeeze(de112111); // {2,1} -// auto e112311 = squeeze(de112311); // {2,3} - -// BOOST_CHECK( (e1 == extents<2>{1,1}) ); -// BOOST_CHECK( (e2 == extents<2>{1,2}) ); -// BOOST_CHECK( (21 == extents<2>{2,1}) ); - -// BOOST_CHECK( (e4 == extents<2>{2,3}) ); -// BOOST_CHECK( (e231 == extents<2>{2,3}) ); -// BOOST_CHECK( (e123 == extents<2>{2,3}) ); -// BOOST_CHECK( (e1123 == extents<2>{2,3}) ); -// BOOST_CHECK( (e12311 == extents<2>{2,3}) ); - -// BOOST_CHECK( (e423 == extents<3>{4,2,3}) ); -// BOOST_CHECK( (e4213 == extents<3>{4,2,3}) ); -// BOOST_CHECK( (e11 == extents<3>{4,2,3}) ); -// BOOST_CHECK( (e12 == extents<3>{4,2,3}) ); - -// BOOST_CHECK( (e141 == extents<2>{1,4}) ); -// BOOST_CHECK( (e1111 == extents<2>{1,1}) ); -// BOOST_CHECK( (e14111 == extents<2>{1,4}) ); -// BOOST_CHECK( (e112111 == extents<2>{2,1}) ); -// BOOST_CHECK( (e112311 == extents<2>{2,3}) ); - -//} - - -BOOST_FIXTURE_TEST_CASE(test_extents_static_size_product, fixture, *boost::unit_test::label("basic_fixed_rank_extents") *boost::unit_test::label("product")) -{ - namespace ublas = boost::numeric::ublas; - -// auto e = ublas::product( de ); - auto e11 = ublas::product( de11 ); - auto e12 = ublas::product( de12 ); - auto e21 = ublas::product( de21 ); - auto e23 = ublas::product( de23 ); - auto e231 = ublas::product( de231 ); - auto e123 = ublas::product( de123 ); - auto e1123 = ublas::product( de1123 ); - auto e12311 = ublas::product( de12311 ); - auto e423 = ublas::product( de423 ); - auto e4213 = ublas::product( de4213 ); - auto e42131 = ublas::product( de42131 ); - auto e142131 = ublas::product( de142131 ); - auto e141 = ublas::product( de141 ); - auto e1111 = ublas::product( de1111 ); - auto e14111 = ublas::product( de14111 ); - auto e112111 = ublas::product( de112111 ); - auto e112311 = ublas::product( de112311 ); - -// BOOST_CHECK_EQUAL( e , 0 ); - BOOST_CHECK_EQUAL( e11 , 1 ); - BOOST_CHECK_EQUAL( e12 , 2 ); - BOOST_CHECK_EQUAL( e21 , 2 ); - BOOST_CHECK_EQUAL( e23 , 6 ); - BOOST_CHECK_EQUAL( e231 , 6 ); - BOOST_CHECK_EQUAL( e123 , 6 ); - BOOST_CHECK_EQUAL( e1123 , 6 ); - BOOST_CHECK_EQUAL( e12311 , 6 ); - BOOST_CHECK_EQUAL( e423 , 24 ); - BOOST_CHECK_EQUAL( e4213 , 24 ); - BOOST_CHECK_EQUAL( e42131 , 24 ); - BOOST_CHECK_EQUAL( e142131, 24 ); - BOOST_CHECK_EQUAL( e141 , 4 ); - BOOST_CHECK_EQUAL( e1111 , 1 ); - BOOST_CHECK_EQUAL( e14111 , 4 ); - BOOST_CHECK_EQUAL( e112111, 2 ); - BOOST_CHECK_EQUAL( e112311, 6 ); - - -} - -BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_matrix_vector.cpp b/test/tensor/test_subtensor_matrix_vector.cpp index e988d9f75..7baf235c3 100644 --- a/test/tensor/test_subtensor_matrix_vector.cpp +++ b/test/tensor/test_subtensor_matrix_vector.cpp @@ -24,65 +24,6 @@ BOOST_AUTO_TEST_SUITE ( test_tensor_static_rank_matrix_interoperability ) using test_types = zip::with_t; -BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor, value, test_types) -{ - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout = typename value::second_type; - using tensor = ublas::tensor_static_rank; - using matrix = typename tensor::matrix_type; - - auto a2 = tensor( matrix(1,1) ); - BOOST_CHECK_EQUAL( a2.size() , 1 ); - BOOST_CHECK( !a2.empty() ); - BOOST_CHECK_NE( a2.data() , nullptr); - - auto a3 = tensor( matrix(2,1) ); - BOOST_CHECK_EQUAL( a3.size() , 2 ); - BOOST_CHECK( !a3.empty() ); - BOOST_CHECK_NE( a3.data() , nullptr); - - auto a4 = tensor( matrix(1,2) ); - BOOST_CHECK_EQUAL( a4.size() , 2 ); - BOOST_CHECK( !a4.empty() ); - BOOST_CHECK_NE( a4.data() , nullptr); - - auto a5 = tensor( matrix(2,3) ); - BOOST_CHECK_EQUAL( a5.size() , 6 ); - BOOST_CHECK( !a5.empty() ); - BOOST_CHECK_NE( a5.data() , nullptr); -} - - -BOOST_AUTO_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor, value, test_types) -{ - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::tensor_static_rank; - using vector_type = typename tensor_type::vector_type; - - auto a2 = tensor_type( vector_type(1) ); - BOOST_CHECK_EQUAL( a2.size() , 1 ); - BOOST_CHECK( !a2.empty() ); - BOOST_CHECK_NE( a2.data() , nullptr); - - auto a3 = tensor_type( vector_type(2) ); - BOOST_CHECK_EQUAL( a3.size() , 2 ); - BOOST_CHECK( !a3.empty() ); - BOOST_CHECK_NE( a3.data() , nullptr); - - auto a4 = tensor_type( vector_type(2) ); - BOOST_CHECK_EQUAL( a4.size() , 2 ); - BOOST_CHECK( !a4.empty() ); - BOOST_CHECK_NE( a4.data() , nullptr); - - auto a5 = tensor_type( vector_type(3) ); - BOOST_CHECK_EQUAL( a5.size() , 3 ); - BOOST_CHECK( !a5.empty() ); - BOOST_CHECK_NE( a5.data() , nullptr); -} - struct fixture { @@ -106,58 +47,6 @@ struct fixture - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_ctor_extents, value, test_types, fixture ) -{ - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto check = [](auto const& /*unused*/, auto& e) { - constexpr auto size = std::tuple_size_v>; - using tensor = ublas::tensor_static_rank; - using matrix = typename tensor::matrix_type; - - assert(ublas::size(e)==2); - tensor t = matrix{e[0],e[1]}; - BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - }; - - for_each_in_tuple(extents,check); -} - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_ctor_extents, value, test_types, fixture ) -{ - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - - auto check = [](auto const& /*unused*/, auto& e) { - constexpr auto size = std::tuple_size_v>; - using tensor = ublas::tensor_static_rank; - using vector = typename tensor::vector_type; - - assert(ublas::size(e)==2); - if(ublas::empty(e)) - return; - - tensor t = vector (product(e)); - BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - }; - - for_each_in_tuple(extents,check); -} - - - BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, test_types, fixture ) { namespace ublas = boost::numeric::ublas; @@ -227,81 +116,6 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, te for_each_in_tuple(extents,check); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_move_assignment, value, test_types, fixture ) -{ - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - - auto check = [](auto const& /*unused*/, auto& e) { - constexpr auto size = std::tuple_size_v>; - using tensor_type = ublas::tensor_static_rank; - using matrix_type = typename tensor_type::matrix_type; - - assert(ublas::size(e) == 2); - auto t = tensor_type{e[1],e[0]}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - auto q = r; - t = std::move(r); - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto j = 0ul; j < t.size(1); ++j){ - for(auto i = 0ul; i < t.size(0); ++i){ - BOOST_CHECK_EQUAL( t.at(i,j), q(i,j) ); - } - } - }; - - for_each_in_tuple(extents,check); -} - - - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_move_assignment, value, test_types, fixture ) -{ - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto check = [](auto const& /*unused*/, auto& e) { - constexpr auto size = std::tuple_size_v>; - using tensor_type = ublas::tensor_static_rank; - using vector_type = typename tensor_type::vector_type; - - assert(ublas::size(e) == 2); - auto t = tensor_type{e[1],e[0]}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - auto q = r; - t = std::move(r); - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) * e.at(1)); - BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto i = 0ul; i < t.size(); ++i){ - BOOST_CHECK_EQUAL( t[i], q(i) ); - } - }; - - for_each_in_tuple(extents,check); -} - - - - BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_types, fixture ) { From 7ae89d3fc363aac1b93986f489b9ba071ef2684a Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Mon, 23 Aug 2021 13:17:09 +0530 Subject: [PATCH 36/40] Add tests for subtensor --- examples/tensor/Jamfile | 14 +-- examples/tensor/access_subtensor.cpp | 33 +++-- .../ublas/tensor/expression_evaluation.hpp | 34 ++++-- .../ublas/tensor/subtensor_utility.hpp | 35 +----- .../ublas/tensor/tensor/subtensor_dynamic.hpp | 41 +++---- .../tensor/tensor/subtensor_static_rank.hpp | 34 ++++-- test/tensor/Jamfile | 75 ++++++------ .../test_subtensor_expression_evaluation.cpp | 5 +- .../test_subtensor_operators_arithmetic.cpp | 114 ++++++++++-------- .../test_subtensor_operators_comparison.cpp | 58 +++++---- 10 files changed, 239 insertions(+), 204 deletions(-) diff --git a/examples/tensor/Jamfile b/examples/tensor/Jamfile index 5486e4fbd..d8dcfcfff 100644 --- a/examples/tensor/Jamfile +++ b/examples/tensor/Jamfile @@ -19,11 +19,11 @@ project boost-ublas-tensor-example [ requires cxx17_if_constexpr ] ; -# exe access_tensor : access_tensor.cpp ; -# exe simple_expressions : simple_expressions.cpp ; -# exe multiply_tensors_product_function : multiply_tensors_product_function.cpp ; -# exe multiply_tensors_einstein_notation : multiply_tensors_einstein_notation.cpp ; -# exe instantiate_tensor : instantiate_tensor.cpp ; -# exe expressions_subtensor : expressions_subtensor.cpp ; -# exe instantiate_subtensor : instantiate_subtensor.cpp ; +exe access_tensor : access_tensor.cpp ; +exe simple_expressions : simple_expressions.cpp ; +exe multiply_tensors_product_function : multiply_tensors_product_function.cpp ; +exe multiply_tensors_einstein_notation : multiply_tensors_einstein_notation.cpp ; +exe instantiate_tensor : instantiate_tensor.cpp ; +exe expressions_subtensor : expressions_subtensor.cpp ; +exe instantiate_subtensor : instantiate_subtensor.cpp ; exe access_subtensor : access_subtensor.cpp ; diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index 6a90f14bd..8f1861678 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -26,6 +26,9 @@ int main() using tensor = ublas::tensor_dynamic; using span = ublas::span<>; using subtensor = typename tensor::subtensor_type; + auto uplus1 = [](auto const& a){return a + value(1); }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; constexpr auto ones = ublas::ones{}; @@ -43,23 +46,39 @@ int main() } } } - auto A = subtensor(t1, span(1,1,2), span(0,2,2), span()); - auto B = subtensor(A, span(), span(), span(1)); + auto A = t1 (span(1,1,2), span(0,2,2), span()); + + auto B = subtensor(A); std::cout << "% --------------------------- " << std::endl; - for (auto x: A.extents()) { + auto uexpr1 = ublas::detail::make_unary_tensor_expression( B, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( A, uplus1 ); + for (auto& x: uexpr1.e.extents()) { std::cout << x << " "; } + std::cout << std::endl; + for (auto& x: uexpr2.e.extents()) { + std::cout << x << " "; + } + std::cout << std::endl; std::cout << "% --------------------------- " << std::endl; - for (auto x: B.extents()) { + + // bexpr_uexpr = (s1+1) + (2+s2) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + + // bexpr_bexpr_uexpr = ((s1+1) + (2+s2)) - s2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, B, bminus ); + + auto ext = ublas::detail::retrieve_extents(bexpr_bexpr_uexpr1); + for (auto& x: ext) { std::cout << x << " "; } std::cout << std::endl; - tensor t2 = ones(2,2,1); - auto t3 = ublas::inner_prod(B, t2); + tensor t2 = ones(2,2,2) + A + B; + auto t3 = ublas::inner_prod(B, t2); // // // formatted output - // std::cout << "% --------------------------- " << std::endl << std::endl; + std::cout << "% --------------------------- " << std::endl << std::endl; std::cout << "t1=" << t1 << ";" << std::endl << std::endl; std::cout << "B=" << B << ";" << std::endl << std::endl; std::cout << "t2=" << t2 << ";" << std::endl << std::endl; diff --git a/include/boost/numeric/ublas/tensor/expression_evaluation.hpp b/include/boost/numeric/ublas/tensor/expression_evaluation.hpp index 1e3b18569..c9e746db5 100644 --- a/include/boost/numeric/ublas/tensor/expression_evaluation.hpp +++ b/include/boost/numeric/ublas/tensor/expression_evaluation.hpp @@ -45,6 +45,18 @@ struct unary_tensor_expression; namespace boost::numeric::ublas::detail { +template +struct is_compat +{ static constexpr bool value = false; }; + +template +struct is_compat +{ static constexpr bool value = true; }; + +template +struct is_compat +{ static constexpr bool value = true; }; + template struct has_tensor_types { static constexpr bool value = false; }; @@ -55,15 +67,15 @@ struct has_tensor_types template struct has_tensor_types> -{ static constexpr bool value = std::is_same::value || has_tensor_types::value; }; +{ static constexpr bool value = is_compat::value || has_tensor_types::value; }; template struct has_tensor_types> -{ static constexpr bool value = std::is_same::value || std::is_same::value || has_tensor_types::value || has_tensor_types::value; }; +{ static constexpr bool value = is_compat::value || is_compat::value || has_tensor_types::value || has_tensor_types::value; }; template struct has_tensor_types> -{ static constexpr bool value = std::is_same::value || has_tensor_types::value; }; +{ static constexpr bool value = is_compat::value || has_tensor_types::value; }; } // namespace boost::numeric::ublas::detail @@ -96,7 +108,7 @@ constexpr auto& retrieve_extents(tensor_expression const& expr) auto const& cast_expr = static_cast(expr); - if constexpr ( std::is_same::value ) + if constexpr ( detail::is_compat::value ) return cast_expr.extents(); else return retrieve_extents(cast_expr); @@ -120,10 +132,10 @@ constexpr auto& retrieve_extents(binary_tensor_expression const& exp static_assert(detail::has_tensor_types>::value, "Error in boost::numeric::ublas::detail::retrieve_extents: Expression to evaluate should contain tensors."); - if constexpr ( std::is_same::value ) + if constexpr ( detail::is_compat::value ) return expr.el.extents(); - if constexpr ( std::is_same::value ) + if constexpr ( detail::is_compat::value ) return expr.er.extents(); else if constexpr ( detail::has_tensor_types::value ) @@ -151,7 +163,7 @@ constexpr auto& retrieve_extents(unary_tensor_expression const& expr) static_assert(detail::has_tensor_types>::value, "Error in boost::numeric::ublas::detail::retrieve_extents: Expression to evaluate should contain tensors."); - if constexpr ( std::is_same::value ) + if constexpr ( detail::is_compat::value ) return expr.e.extents(); else if constexpr ( detail::has_tensor_types::value ) @@ -185,7 +197,7 @@ constexpr auto all_extents_equal(tensor_expression const& expr, extents::value ) + if constexpr ( detail::is_compat::value ) if( e != cast_expr.extents() ) return false; @@ -207,11 +219,11 @@ constexpr auto all_extents_equal(binary_tensor_expression const& exp using ::operator==; using ::operator!=; - if constexpr ( std::is_same::value ) + if constexpr ( detail::is_compat::value ) if(e != expr.el.extents()) return false; - if constexpr ( std::is_same::value ) + if constexpr ( detail::is_compat::value ) if(e != expr.er.extents()) return false; @@ -236,7 +248,7 @@ constexpr auto all_extents_equal(unary_tensor_expression const& expr, ex using ::operator==; - if constexpr ( std::is_same::value ) + if constexpr ( detail::is_compat::value ) if(e != expr.e.extents()) return false; diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp index 52ddd416e..f9cb8ef39 100644 --- a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -176,11 +176,11 @@ auto transform_span(span const& s, std::size_t const extent) } -template -void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ); +template< std::size_t r, std::size_t ... es, std::size_t n, class Span, class ... Spans> +void transform_spans_impl (extents const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ); -template -void transform_spans_impl(extents<> const& extents, std::array& span_array, span const& s, Spans&& ... spans) +template< std::size_t r, std::size_t ... es, std::size_t n, class size_type, class Span, class ... Spans> +void transform_spans_impl(extents const& extents, std::array& span_array, span const& s, Spans&& ... spans) { std::get(span_array) = transform_span(s, extents[r]); static constexpr auto nspans = sizeof...(spans); @@ -189,8 +189,8 @@ void transform_spans_impl(extents<> const& extents, std::array& span_ar transform_spans_impl(extents, span_array, std::forward(spans)...); } -template -void transform_spans_impl (extents<> const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ) +template< std::size_t r, std::size_t ... es, std::size_t n, class Span, class ... Spans> +void transform_spans_impl (extents const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ) { static constexpr auto nspans = sizeof...(Spans); static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); @@ -201,29 +201,6 @@ void transform_spans_impl (extents<> const& extents, std::array& span_ar } -template -void transform_spans_impl (extents const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ); - -template -void transform_spans_impl(extents const& extents, std::array& span_array, span const& s, Spans&& ... spans) -{ - std::get(span_array) = transform_span(s, extents[r]); - static constexpr auto nspans = sizeof...(spans); - static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); - if constexpr (nspans>0) - transform_spans_impl(extents, span_array, std::forward(spans)...); -} - -template -void transform_spans_impl (extents const& extents, std::array& span_array, std::size_t arg, Spans&& ... spans ) -{ - static constexpr auto nspans = sizeof...(Spans); - static_assert (n==(nspans+r+1),"Static error in boost::numeric::ublas::detail::transform_spans_impl: size mismatch"); - std::get(span_array) = transform_span(Span(arg), extents[r]); - if constexpr (nspans>0) - transform_spans_impl(extents, span_array, std::forward(spans) ... ); -} - /*! @brief Auxiliary function for subtensor that generates array of spans * diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index a7241aca5..94ee98938 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -38,14 +38,16 @@ namespace boost::numeric::ublas { template class tensor_core>> : public detail::tensor_expression< - tensor_dynamic, tensor_dynamic> { + tensor_dynamic, + tensor_core>> + > { public: using tensor_type = tensor_dynamic; using engine_type = subtensor_engine; using self_type = tensor_core; template - using tensor_expression_type = detail::tensor_expression; + using tensor_expression_type = detail::tensor_expression; template using matrix_expression_type = matrix_expression; template @@ -97,10 +99,8 @@ class tensor_core>> explicit tensor_core() = delete; - tensor_core(const tensor_core&) = default; - tensor_core(tensor_type& t) - : tensor_expression_type{} + : tensor_expression_type{} , _spans() , _extents(t.extents()) , _strides(t.strides()) @@ -111,7 +111,7 @@ class tensor_core>> template tensor_core(U&& t, FS&& first, SL&&... spans) - : tensor_expression_type{} + : tensor_expression_type{} , _spans(detail::generate_span_vector(t.extents(), std::forward(first), std::forward(spans)...)) , _extents{} , _strides{detail::to_span_strides(t.strides(), _spans)} @@ -120,22 +120,23 @@ class tensor_core>> { _extents = detail::to_extents(_spans); _span_strides = ublas::to_strides(_extents,layout_type{}); - for (int i = 0; i < (int) _extents.size(); i++) { - std::cout << _extents[i] << " "; - } - std::cout << std::endl; - for (int i = 0; i < (int) _span_strides.size(); i++) { - std::cout << _span_strides[i] << " "; - } - std::cout << std::endl; - for (int i = 0; i < (int) _strides.size(); i++) { - std::cout << _strides[i] << " "; - } - std::cout << std::endl; } + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be copied. + */ + inline tensor_core (const tensor_core &t) + : tensor_expression_type{} + , _spans(t._spans) + , _extents (t._extents ) + , _strides (t._strides ) + , _span_strides(t._span_strides ) + , _data (t._data) + {} + tensor_core(tensor_core&& v) - : tensor_expression_type{} + : tensor_expression_type{} , _spans (std::move(v._spans)) , _extents(std::move(v._extents)) , _strides(std::move(v._strides)) @@ -277,9 +278,7 @@ class tensor_core>> */ [[nodiscard]] inline reference operator[](size_type i) { - std::cout << "idx:" << i; const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin()); - std::cout << "->" << idx << std::endl; return _data[idx]; } diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp index 9a2c2f875..af6be72ab 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -39,7 +39,8 @@ template class tensor_core>>> : public detail::tensor_expression< tensor_core>, - tensor_core>> { + tensor_core>>> + > { public: using tensor_type = tensor_core>; using engine_type = subtensor_engine; @@ -97,10 +98,9 @@ class tensor_core> explicit tensor_core() = delete; - tensor_core(const tensor_core&) = default; tensor_core(tensor_type& t) - : tensor_expression_type{} + : tensor_expression_type{} , _spans() , _extents(t.extents()) , _strides(t.strides()) @@ -111,7 +111,7 @@ class tensor_core> template tensor_core(U&& t, FS&& first, SL&&... spans) - : tensor_expression_type{} + : tensor_expression_type{} , _spans(detail::generate_span_array(t.extents(), std::forward(first), std::forward(spans)...)) , _extents{} , _strides{detail::to_span_strides(t.strides(), _spans)} @@ -122,17 +122,27 @@ class tensor_core> _span_strides = ublas::to_strides(_extents,layout_type{}); } - + /** @brief Constructs a tensor_core from another tensor_core + * + * @param t tensor_core to be copied. + */ + inline tensor_core (const tensor_core &t) + : tensor_expression_type{} + , _spans(t._spans) + , _extents (t._extents ) + , _strides (t._strides ) + , _span_strides(t._span_strides ) + , _data (t._data) + {} tensor_core(tensor_core&& v) - : tensor_expression_type{} + : tensor_expression_type{} , _spans (std::move(v._spans)) , _extents(std::move(v._extents)) , _strides(std::move(v._strides)) , _span_strides(std::move(v._span_strides)) , _data(std::move(v._data)) { - _extents = detail::to_extents(_spans); } /// @brief Default destructor @@ -245,7 +255,7 @@ class tensor_core> */ [[nodiscard]] inline const_reference operator[](size_type i) const { - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _data); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin()); return _data[idx]; } @@ -257,9 +267,7 @@ class tensor_core> */ [[nodiscard]] inline reference operator[](size_type i) { - std::cout << "idx:" << i; - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _data); - std::cout << "->" << idx << std::endl; + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin()); return _data[idx]; } @@ -273,7 +281,7 @@ class tensor_core> template [[nodiscard]] inline const_reference at(size_type i) const { - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _data); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin()); return _data[idx]; } @@ -285,7 +293,7 @@ class tensor_core> */ [[nodiscard]] inline reference at(size_type i) { - const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin(), _data); + const auto idx = detail::compute_single_index(i, _strides.rbegin(), _strides.rend(), _span_strides.rbegin()); return _data[idx]; } diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index a2aa4531c..54e5116ef 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -32,47 +32,46 @@ explicit unit_test_framework ; test-suite boost-ublas-tensor-test : - [ run # test_access.cpp - # test_algorithms.cpp - # test_einstein_notation.cpp - # test_expression.cpp - # test_expression_evaluation.cpp - # test_extents_dynamic.cpp - # test_extents_dynamic_rank_static.cpp - # test_extents_functions.cpp + [ run test_access.cpp + test_algorithms.cpp + test_einstein_notation.cpp + test_expression.cpp + test_expression_evaluation.cpp + test_extents_dynamic.cpp + test_extents_dynamic_rank_static.cpp + test_extents_functions.cpp test_fixed_rank_expression_evaluation.cpp - # test_fixed_rank_extents.cpp - # test_fixed_rank_functions.cpp - # test_fixed_rank_operators_arithmetic.cpp - # test_fixed_rank_operators_comparison.cpp - # test_fixed_rank_strides.cpp + test_fixed_rank_extents.cpp + test_fixed_rank_functions.cpp + test_fixed_rank_operators_arithmetic.cpp + test_fixed_rank_operators_comparison.cpp + test_fixed_rank_strides.cpp test_fixed_rank_tensor.cpp - # test_fixed_rank_tensor_matrix_vector.cpp - # test_functions.cpp - # test_multi_index.cpp - # test_multi_index_utility.cpp - # test_multiplication.cpp - # test_operators_arithmetic.cpp - # test_operators_comparison.cpp - # test_span.cpp - # test_static_expression_evaluation.cpp - # test_static_extents.cpp - # test_static_operators_arithmetic.cpp - # test_static_operators_comparison.cpp - # test_static_strides.cpp - # test_static_tensor.cpp - # test_static_tensor_matrix_vector.cpp - # test_strides.cpp - # test_subtensor.cpp - # test_subtensor_expression_evaluation.cpp - # test_subtensor_extents.cpp - # test_subtensor_matrix_vector.cpp - # test_subtensor_operators_arithmetic.cpp - # test_subtensor_operators_comparison.cpp - # test_subtensor_utility.cpp + test_fixed_rank_tensor_matrix_vector.cpp + test_functions.cpp + test_multi_index.cpp + test_multi_index_utility.cpp + test_multiplication.cpp + test_operators_arithmetic.cpp + test_operators_comparison.cpp + test_span.cpp + test_static_expression_evaluation.cpp + test_static_extents.cpp + test_static_operators_arithmetic.cpp + test_static_operators_comparison.cpp + test_static_strides.cpp + test_static_tensor.cpp + test_static_tensor_matrix_vector.cpp + test_strides.cpp + test_subtensor_expression_evaluation.cpp + # test_subtensor_matrix_vector.cpp + test_subtensor_operators_arithmetic.cpp + test_subtensor_operators_comparison.cpp + test_subtensor_utility.cpp + # test_subtensor.cpp test_tensor.cpp - # test_tensor_matrix_vector.cpp - # unit_test_framework + test_tensor_matrix_vector.cpp + unit_test_framework : : : diff --git a/test/tensor/test_subtensor_expression_evaluation.cpp b/test/tensor/test_subtensor_expression_evaluation.cpp index 4b41830dc..ef084dc14 100644 --- a/test/tensor/test_subtensor_expression_evaluation.cpp +++ b/test/tensor/test_subtensor_expression_evaluation.cpp @@ -18,6 +18,7 @@ #include "utility.hpp" +#include #include #include #include @@ -130,7 +131,8 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_expression_retrieve auto s1 = subtensor_type1(t1); auto s2 = subtensor_type2(t2); - + BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) == ublas::detail::retrieve_extents( t1 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( s2 ) == ublas::detail::retrieve_extents( t2 ) ); BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) != ublas::detail::retrieve_extents( s2 ) ); // uexpr1 = s1+1 @@ -154,7 +156,6 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_expression_retrieve BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(s2) ); - // bexpr_bexpr_uexpr = s2 - ((s1+1) + (2+s2)) auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( s2, bexpr_uexpr, bminus ); diff --git a/test/tensor/test_subtensor_operators_arithmetic.cpp b/test/tensor/test_subtensor_operators_arithmetic.cpp index 08ef6b8f9..eea3a9214 100644 --- a/test/tensor/test_subtensor_operators_arithmetic.cpp +++ b/test/tensor/test_subtensor_operators_arithmetic.cpp @@ -55,43 +55,47 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, valu { constexpr auto size = std::tuple_size_v>; using tensor_t = ublas::tensor_static_rank; + using subtensor = typename tensor_t::subtensor_type; auto t = tensor_t (e); auto t2 = tensor_t (e); auto r = tensor_t (e); + auto s = subtensor(t); auto v = value_t {}; + BOOST_CHECK_EQUAL(t.size(), s.size()); + std::iota(t.begin(), t.end(), v); std::iota(t2.begin(), t2.end(), v+2); - r = t + t + t + t2; + r = s + s + s + t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 3*t(i) + t2(i) ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 3*s(i) + t2(i) ); - r = t2 / (t+3) * (t+1) - t2; // r = ( t2/ ((t+3)*(t+1)) ) - t2 + r = t2 / (s+3) * (s+1) - t2; // r = ( t2/ ((s+3)*(s+1)) ) - t2 - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), t2(i) / (t(i)+3)*(t(i)+1) - t2(i) ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), t2(i) / (s(i)+3)*(s(i)+1) - t2(i) ); - r = 3+t2 / (t+3) * (t+1) * t - t2; // r = 3+( t2/ ((t+3)*(t+1)*t) ) - t2 + r = 3+t2 / (s+3) * (s+1) * s - t2; // r = 3+( t2/ ((s+3)*(s+1)*s) ) - t2 - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 3+t2(i) / (t(i)+3)*(t(i)+1)*t(i) - t2(i) ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 3+t2(i) / (s(i)+3)*(s(i)+1)*s(i) - t2(i) ); - r = t2 - t + t2 - t; + r = t2 - s + t2 - s; for(auto i = 0ul; i < r.size(); ++i) BOOST_CHECK_EQUAL ( r(i), 4 ); - r = t * t * t * t2; + r = s * s * s * t2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), t(i)*t(i)*t(i)*t2(i) ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), s(i)*s(i)*s(i)*t2(i) ); r = (t2/t2) * (t2/t2); - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( r(i), 1 ); }; @@ -111,41 +115,46 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value { constexpr auto size = std::tuple_size_v>; using tensor_t = ublas::tensor_static_rank; + using subtensor = typename tensor_t::subtensor_type; + auto t = tensor_t (e); auto t2 = tensor_t (e); auto v = value_t {}; + auto s = subtensor(t); + BOOST_CHECK_EQUAL(t.size(), s.size()); + std::iota(t.begin(), t.end(), v); std::iota(t2.begin(), t2.end(), v+2); - tensor_t r1 = t + 2 + t + 2; + tensor_t r1 = s + 2 + s + 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r1(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r1(i), 2*s(i) + 4 ); - tensor_t r2 = 2 + t + 2 + t; + tensor_t r2 = 2 + s + 2 + s; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r2(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r2(i), 2*s(i) + 4 ); - tensor_t r3 = (t-2) + (t-2); + tensor_t r3 = (s-2) + (s-2); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r3(i), 2*t(i) - 4 ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r3(i), 2*s(i) - 4 ); - tensor_t r4 = (t*2) * (3*t); + tensor_t r4 = (s*2) * (3*s); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r4(i), 2*3*t(i)*t(i) ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r4(i), 2*3*s(i)*s(i) ); tensor_t r5 = (t2*2) / (2*t2) * t2; - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( r5(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); tensor_t r6 = (t2/2+1) / (2/t2+1) / t2; - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( r6(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); }; @@ -168,66 +177,71 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, valu { constexpr auto size = std::tuple_size_v>; using tensor_t = ublas::tensor_static_rank; + using subtensor = typename tensor_t::subtensor_type; + auto t = tensor_t (e); auto t2 = tensor_t (e); auto r = tensor_t (e); auto v = value_t {}; + auto s = subtensor(t); + BOOST_CHECK_EQUAL(t.size(), s.size()); + std::iota(t.begin(), t.end(), v); std::iota(t2.begin(), t2.end(), v+2); - r = t + 2; - r += t; + r = s + 2; + r += s; r += 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*s(i) + 4 ); - r = 2 + t; - r += t; + r = 2 + s; + r += s; r += 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*s(i) + 4 ); - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) + 4 ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*s(i) + 4 ); - r = (t-2); - r += t; + r = (s-2); + r += s; r -= 2; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*t(i) - 4 ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*s(i) - 4 ); - r = (t*2); + r = (s*2); r *= 3; - r *= t; + r *= s; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 2*3*t(i)*t(i) ); + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*3*s(i)*s(i) ); r = (t2*2); r /= 2; r /= t2; r *= t2; - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( r(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); r = (t2/2+1); r /= (2/t2+1); r /= t2; - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( r(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); tensor_t q = -r; - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( q(i), -r(i) ); tensor_t p = +r; - for(auto i = 0ul; i < t.size(); ++i) + for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( p(i), r(i) ); }; diff --git a/test/tensor/test_subtensor_operators_comparison.cpp b/test/tensor/test_subtensor_operators_comparison.cpp index 1eb3c4396..7b80509f4 100644 --- a/test/tensor/test_subtensor_operators_comparison.cpp +++ b/test/tensor/test_subtensor_operators_comparison.cpp @@ -53,29 +53,33 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fi { using extents_t = std::decay_t; using tensor_t = ublas::tensor_static_rank, layout_t>; + using subtensor = typename tensor_t::subtensor_type; + auto t = tensor_t (e); auto t2 = tensor_t (e); auto v = value_t {}; + auto s = subtensor(t); + std::iota(t.begin(), t.end(), v); std::iota(t2.begin(), t2.end(), v+2); - BOOST_CHECK( t == t ); - BOOST_CHECK( t != t2 ); + BOOST_CHECK( s == s ); + BOOST_CHECK( s != t2 ); - if(t.empty()) + if(s.empty()) return; - BOOST_CHECK(!(t < t)); - BOOST_CHECK(!(t > t)); - BOOST_CHECK( t < t2 ); - BOOST_CHECK( t2 > t ); - BOOST_CHECK( t <= t ); - BOOST_CHECK( t >= t ); - BOOST_CHECK( t <= t2 ); - BOOST_CHECK( t2 >= t ); + BOOST_CHECK(!(s < s)); + BOOST_CHECK(!(s > s)); + BOOST_CHECK( s < t2 ); + BOOST_CHECK( t2 > s ); + BOOST_CHECK( s <= s ); + BOOST_CHECK( s >= s ); + BOOST_CHECK( s <= t2 ); + BOOST_CHECK( t2 >= s ); BOOST_CHECK( t2 >= t2 ); - BOOST_CHECK( t2 >= t ); + BOOST_CHECK( t2 >= s ); }; for_each_in_tuple(extents,check); @@ -93,33 +97,35 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { using extents_t = std::decay_t; using tensor_t = ublas::tensor_static_rank, layout_t>; + using subtensor = typename tensor_t::subtensor_type; auto t = tensor_t (e); auto t2 = tensor_t (e); auto v = value_t {}; + auto s = subtensor(t); std::iota(t.begin(), t.end(), v); std::iota(t2.begin(), t2.end(), v+2); - BOOST_CHECK( t == t ); - BOOST_CHECK( t != t2 ); + BOOST_CHECK( s == s ); + BOOST_CHECK( s != t2 ); - if(t.empty()) + if(s.empty()) return; - BOOST_CHECK( !(t < t) ); - BOOST_CHECK( !(t > t) ); - BOOST_CHECK( t < (t2+t) ); - BOOST_CHECK( (t2+t) > t ); - BOOST_CHECK( t <= (t+t) ); - BOOST_CHECK( (t+t2) >= t ); - BOOST_CHECK( (t2+t2+2) >= t); - BOOST_CHECK( 2*t2 > t ); - BOOST_CHECK( t < 2*t2 ); - BOOST_CHECK( 2*t2 > t); + BOOST_CHECK( !(s < s) ); + BOOST_CHECK( !(s > s) ); + BOOST_CHECK( s < (t2+s) ); + BOOST_CHECK( (t2+s) > s ); + BOOST_CHECK( s <= (s+s) ); + BOOST_CHECK( (s+t2) >= s ); + BOOST_CHECK( (t2+t2+2) >= s); + BOOST_CHECK( 2*t2 > s ); + BOOST_CHECK( s < 2*t2 ); + BOOST_CHECK( 2*t2 > s); BOOST_CHECK( 2*t2 >= t2 ); BOOST_CHECK( t2 <= 2*t2); - BOOST_CHECK( 3*t2 >= t ); + BOOST_CHECK( 3*t2 >= s ); }); From 0f7538a11d497e3db72a0bf44dc1382deb764340 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Mon, 23 Aug 2021 21:09:12 +0530 Subject: [PATCH 37/40] Prepare branch for PR --- .../ublas/tensor/tensor/subtensor_dynamic.hpp | 1 + .../tensor/tensor/subtensor_static_rank.hpp | 1 + .../ublas/tensor/tensor/tensor_dynamic.hpp | 4 +- test/tensor/Jamfile | 6 +- test/tensor/test_subtensor.cpp | 742 ++++++++++-------- .../test_subtensor_expression_evaluation.cpp | 248 +++--- test/tensor/test_subtensor_matrix_vector.cpp | 286 ------- .../test_subtensor_operators_arithmetic.cpp | 192 +++-- .../test_subtensor_operators_comparison.cpp | 250 +++--- ...nsor_static_rank_expression_evaluation.cpp | 296 +++++++ ...ensor_static_rank_operators_arithmetic.cpp | 255 ++++++ ...ensor_static_rank_operators_comparison.cpp | 203 +++++ 12 files changed, 1552 insertions(+), 932 deletions(-) delete mode 100644 test/tensor/test_subtensor_matrix_vector.cpp create mode 100644 test/tensor/test_subtensor_static_rank_expression_evaluation.cpp create mode 100644 test/tensor/test_subtensor_static_rank_operators_arithmetic.cpp create mode 100644 test/tensor/test_subtensor_static_rank_operators_comparison.cpp diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index 94ee98938..3dc87fcd9 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -390,6 +390,7 @@ class tensor_core>> [[nodiscard]] inline auto rank () const { return _extents.size(); } [[nodiscard]] inline auto order () const { return this->rank(); } + [[nodiscard]] inline auto const& spans () const noexcept { return _spans; } [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } [[nodiscard]] inline auto const& span_strides () const noexcept { return _span_strides; } [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp index af6be72ab..c40a2b16e 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -366,6 +366,7 @@ class tensor_core> [[nodiscard]] inline auto rank () const { return std::tuple_size_v; } [[nodiscard]] inline auto order () const { return this->rank(); } + [[nodiscard]] inline auto const& spans () const noexcept { return _spans; } [[nodiscard]] inline auto const& strides () const noexcept { return _strides; } [[nodiscard]] inline auto const& span_strides () const noexcept { return _span_strides; } [[nodiscard]] inline auto const& extents () const noexcept { return _extents; } diff --git a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp index 356da1246..a3a36fddc 100644 --- a/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/tensor_dynamic.hpp @@ -241,8 +241,8 @@ template * * @code tensor_core A = b + 3 * b; @endcode * - * @note matrix expression is evaluated and pushed into a temporary matrix before assignment. - * @note extents are automatically extracted from the temporary matrix + * @note vector expression is evaluated and pushed into a temporary vector before assignment. + * @note extents are automatically extracted from the temporary vector * * @param expr vector expression */ diff --git a/test/tensor/Jamfile b/test/tensor/Jamfile index 54e5116ef..a627ac550 100644 --- a/test/tensor/Jamfile +++ b/test/tensor/Jamfile @@ -64,11 +64,13 @@ test-suite boost-ublas-tensor-test test_static_tensor_matrix_vector.cpp test_strides.cpp test_subtensor_expression_evaluation.cpp - # test_subtensor_matrix_vector.cpp test_subtensor_operators_arithmetic.cpp test_subtensor_operators_comparison.cpp + test_subtensor_static_rank_expression_evaluation.cpp + test_subtensor_static_rank_operators_arithmetic.cpp + test_subtensor_static_rank_operators_comparison.cpp test_subtensor_utility.cpp - # test_subtensor.cpp + test_subtensor.cpp test_tensor.cpp test_tensor_matrix_vector.cpp unit_test_framework diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index c4c7b3e09..ab4f0131f 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -27,49 +27,55 @@ using test_types = zip>::with_t; - - fixture_shape() : extents{ - shape{}, // 0 - shape{1,1}, // 1 - shape{1,2}, // 2 - shape{2,1}, // 3 - shape{2,3}, // 4 - shape{2,3,1}, // 5 - shape{4,1,3}, // 6 - shape{1,2,3}, // 7 - shape{4,2,3}, // 8 - shape{4,2,3,5} // 9 - } - {} - std::vector extents; + using extents_type = boost::numeric::ublas::extents<>; + + fixture() : extents { + extents_type{1,1}, // 1 + extents_type{1,2}, // 2 + extents_type{2,1}, // 3 + extents_type{2,3}, // 4 + extents_type{2,3,1}, // 5 + extents_type{4,1,3}, // 6 + extents_type{1,2,3}, // 7 + extents_type{4,2,3}, // 8 + extents_type{4,2,3,5}} // 9 + { + } + std::vector extents; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixture_shape ) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( subtensor_ctor1_test, value, test_types, fixture ) { namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; using tensor_type = ublas::tensor_dynamic; - using subtensor_type = ublas::subtensor; + using subtensor_type = typename tensor_type::subtensor_type; auto check = [](auto const& e) { auto t = tensor_type(e); auto s = subtensor_type(t); - BOOST_CHECK_EQUAL ( s.size() , t.size() ); + auto ss = subtensor_type(s); + BOOST_CHECK_EQUAL ( s.size() , t.size() ); + BOOST_CHECK_EQUAL ( ss.size() , t.size() ); BOOST_CHECK_EQUAL ( s.rank() , t.rank() ); + BOOST_CHECK_EQUAL ( ss.rank() , t.rank() ); if(ublas::empty(e)) { BOOST_CHECK_EQUAL ( s.empty(), t.empty() ); + BOOST_CHECK_EQUAL ( ss.empty(), t.empty() ); BOOST_CHECK_EQUAL ( s. data(), t. data() ); + BOOST_CHECK_EQUAL ( ss. data(), t. data() ); } else{ BOOST_CHECK_EQUAL ( !s.empty(), !t.empty() ); + BOOST_CHECK_EQUAL ( !ss.empty(), !t.empty() ); BOOST_CHECK_EQUAL ( s. data(), t. data() ); + BOOST_CHECK_EQUAL ( ss. data(), t. data() ); } }; @@ -87,8 +93,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) using value_type = typename value::first_type; using layout_type = typename value::second_type; using tensor_type = ublas::tensor_dynamic; - using subtensor_type = ublas::subtensor; - using span = ublas::span; + using subtensor_type = typename tensor_type::subtensor_type; + using span = ublas::span<>; { @@ -98,123 +104,411 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) BOOST_CHECK( Asub.strides() == A.strides() ); BOOST_CHECK( Asub.extents() == A.extents() ); BOOST_CHECK( Asub.data() == A.data() ); + + auto Asubsub = subtensor_type( Asub ); + + BOOST_CHECK( Asubsub.strides() == A.strides() ); + BOOST_CHECK( Asubsub.extents() == A.extents() ); + BOOST_CHECK( Asubsub.data() == A.data() ); } { auto A = tensor_type{1,1}; - auto Asub = subtensor_type( A, 0, 0 ); + auto Asub = subtensor_type( A, span(0), span(0) ); BOOST_CHECK( Asub.strides() == A.strides() ); BOOST_CHECK( Asub.extents() == A.extents() ); BOOST_CHECK( Asub.data() == A.data() ); + + auto Asubsub = subtensor_type( Asub, span(0), span(0)); + + BOOST_CHECK( Asubsub.strides() == A.strides() ); + BOOST_CHECK( Asubsub.extents() == A.extents() ); + BOOST_CHECK( Asubsub.data() == A.data() ); + } { auto A = tensor_type{1,2}; - auto Asub = subtensor_type( A, 0, span{} ); + auto Asub = subtensor_type( A, span(0), span{} ); BOOST_CHECK( Asub.strides() == A.strides() ); BOOST_CHECK( Asub.extents() == A.extents() ); BOOST_CHECK( Asub.data() == A.data() ); + + auto Asubsub = subtensor_type( Asub, span(0), span()); + + BOOST_CHECK( Asubsub.strides() == A.strides() ); + BOOST_CHECK( Asubsub.extents() == A.extents() ); + BOOST_CHECK( Asubsub.data() == A.data() ); + } + { auto A = tensor_type{1,2}; - auto Asub = subtensor_type( A, 0, 1 ); + auto Asub = subtensor_type( A, span(), span() ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); - - BOOST_CHECK_EQUAL( Asub.strides().at(0), 1 ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), 1 ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), A.strides().at(1) ); BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); - BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); BOOST_CHECK_EQUAL( Asub.data() , A.data()+ Asub.spans().at(0).first()*A.strides().at(0) + Asub.spans().at(1).first()*A.strides().at(1) ); - } + auto Asubsub = subtensor_type( Asub, span(0), span(1) ); + + BOOST_CHECK_EQUAL( Asubsub.strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asubsub.strides().at(1), A.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asubsub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asubsub.extents().at(1) , 1 ); + + BOOST_CHECK_EQUAL( Asubsub.data() , Asub.data() + + Asubsub.spans().at(0).first()*Asub.strides().at(0) + + Asubsub.spans().at(1).first()*Asub.strides().at(1) ); + + } { auto A = tensor_type{2,3}; - auto Asub = subtensor_type( A, 0, 1 ); + auto Asub = subtensor_type( A, span(), span(0,2,2) ); auto B = tensor_type(Asub.extents()); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), A.strides().at(1) * 2 ); - BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); - BOOST_CHECK_EQUAL( Asub.extents().at(1) , 1 ); + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); - BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + BOOST_CHECK_EQUAL( B.strides().at(0), Asub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( B.strides().at(1), Asub.span_strides().at(1) ); BOOST_CHECK_EQUAL( Asub.data() , A.data()+ Asub.spans().at(0).first()*A.strides().at(0) + Asub.spans().at(1).first()*A.strides().at(1) ); + + auto Asubsub = subtensor_type( Asub, span(1), span() ); + auto C = tensor_type(Asubsub.extents()); + + BOOST_CHECK_EQUAL( Asubsub.strides().at(0), Asub.strides().at(0) ); + BOOST_CHECK_EQUAL( Asubsub.strides().at(1), Asub.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asubsub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asubsub.extents().at(1) , 2 ); + + BOOST_CHECK_EQUAL( C.strides().at(0), Asubsub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( C.strides().at(1), Asubsub.span_strides().at(1)); + + BOOST_CHECK_EQUAL( Asubsub.data() , Asub.data()+ + Asubsub.spans().at(0).first()*Asub.strides().at(0) + + Asubsub.spans().at(1).first()*Asub.strides().at(1) ); + } { auto A = tensor_type{4,3}; - auto Asub = subtensor_type( A, span(1,2), span(1,ub::max) ); + auto Asub = subtensor_type( A, span(0,3,3), span(0,2,ublas::max) ); auto B = tensor_type(Asub.extents()); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), A.strides().at(0) * 3 ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), A.strides().at(1) * 2 ); BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); - BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); + BOOST_CHECK_EQUAL( B.strides().at(0), Asub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( B.strides().at(1), Asub.span_strides().at(1) ); BOOST_CHECK_EQUAL( Asub.data() , A.data()+ Asub.spans().at(0).first()*A.strides().at(0) + Asub.spans().at(1).first()*A.strides().at(1) ); + + auto Asubsub = subtensor_type( Asub, span(1), span(1,ublas::max) ); + auto C = tensor_type(Asubsub.extents()); + + BOOST_CHECK_EQUAL( Asubsub.strides().at(0), Asub.strides().at(0) ); + BOOST_CHECK_EQUAL( Asubsub.strides().at(1), Asub.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asubsub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asubsub.extents().at(1) , 1 ); + + BOOST_CHECK_EQUAL( C.strides().at(0), Asubsub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( C.strides().at(1), Asubsub.span_strides().at(1) ); + + BOOST_CHECK_EQUAL( Asubsub.data() , Asub.data()+ + Asubsub.spans().at(0).first()*Asub.strides().at(0) + + Asubsub.spans().at(1).first()*Asub.strides().at(1) ); } { auto A = tensor_type{4,3,5}; - auto Asub = subtensor_type( A, span(1,2), span(1,ub::max), span(2,4) ); + auto Asub = subtensor_type( A, span(1,2), span(1,ublas::max), span(2,4) ); auto B = tensor_type(Asub.extents()); - BOOST_CHECK_EQUAL( Asub.span_strides().at(0), A.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(1), A.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.span_strides().at(2), A.strides().at(2) ); + BOOST_CHECK_EQUAL( Asub.strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(2), A.strides().at(2) ); BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); BOOST_CHECK_EQUAL( Asub.extents().at(2) , 3 ); - BOOST_CHECK_EQUAL( Asub.strides().at(0), B.strides().at(0) ); - BOOST_CHECK_EQUAL( Asub.strides().at(1), B.strides().at(1) ); - BOOST_CHECK_EQUAL( Asub.strides().at(2), B.strides().at(2) ); + BOOST_CHECK_EQUAL( B.strides().at(0), Asub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( B.strides().at(1), Asub.span_strides().at(1) ); + BOOST_CHECK_EQUAL( B.strides().at(2), Asub.span_strides().at(2) ); BOOST_CHECK_EQUAL( Asub.data() , A.data()+ Asub.spans().at(0).first()*A.strides().at(0) + Asub.spans().at(1).first()*A.strides().at(1)+ Asub.spans().at(2).first()*A.strides().at(2)); - } -} + auto Asubsub = subtensor_type( Asub, span(1), span(), span(0,2,2) ); + + auto C = tensor_type(Asubsub.extents()); + + BOOST_CHECK_EQUAL( Asubsub.strides().at(0), Asub.strides().at(0) ); + BOOST_CHECK_EQUAL( Asubsub.strides().at(1), Asub.strides().at(1) ); + BOOST_CHECK_EQUAL( Asubsub.strides().at(2), Asub.strides().at(2) * 2); + BOOST_CHECK_EQUAL( Asubsub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asubsub.extents().at(1) , 2 ); + BOOST_CHECK_EQUAL( Asubsub.extents().at(2) , 2 ); + BOOST_CHECK_EQUAL( C.strides().at(0), Asubsub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( C.strides().at(1), Asubsub.span_strides().at(1) ); + BOOST_CHECK_EQUAL( C.strides().at(2), Asubsub.span_strides().at(2) ); + + BOOST_CHECK_EQUAL( Asubsub.data() , Asub.data()+ + Asubsub.spans().at(0).first()*Asub.strides().at(0) + + Asubsub.spans().at(1).first()*Asub.strides().at(1)+ + Asubsub.spans().at(2).first()*Asub.strides().at(2)); + } -BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, fixture_shape ) +} + +BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor3_test, value, test_types ) { + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; using tensor_type = ublas::tensor_dynamic; - using subtensor_type = ublas::subtensor; - // using span = ub::sliced_span; + using subtensor_type = typename tensor_type::subtensor_type; + using span = ublas::span<>; + + + { + auto A = tensor_type{}; + auto Asub = subtensor_type( A ); + + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + + auto Asubsub = subtensor_type( Asub ); + + BOOST_CHECK( Asubsub.strides() == A.strides() ); + BOOST_CHECK( Asubsub.extents() == A.extents() ); + BOOST_CHECK( Asubsub.data() == A.data() ); + } + + + + { + auto A = tensor_type{1,1}; + auto Asub = A( span(0), span(0) ); + + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + + auto Asubsub = Asub( span(0), span(0)); + + BOOST_CHECK( Asubsub.strides() == A.strides() ); + BOOST_CHECK( Asubsub.extents() == A.extents() ); + BOOST_CHECK( Asubsub.data() == A.data() ); + + } + + + { + auto A = tensor_type{1,2}; + auto Asub = A( span(0), span{} ); + + BOOST_CHECK( Asub.strides() == A.strides() ); + BOOST_CHECK( Asub.extents() == A.extents() ); + BOOST_CHECK( Asub.data() == A.data() ); + + auto Asubsub = Asub( span(0), span()); + + BOOST_CHECK( Asubsub.strides() == A.strides() ); + BOOST_CHECK( Asubsub.extents() == A.extents() ); + BOOST_CHECK( Asubsub.data() == A.data() ); + + } + + { + auto A = tensor_type{1,2}; + auto Asub = A(span(), span() ); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), A.strides().at(1) ); + + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + + auto Asubsub = Asub( span(0), span(1) ); + + BOOST_CHECK_EQUAL( Asubsub.strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asubsub.strides().at(1), A.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asubsub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asubsub.extents().at(1) , 1 ); + + BOOST_CHECK_EQUAL( Asubsub.data() , Asub.data() + + Asubsub.spans().at(0).first()*Asub.strides().at(0) + + Asubsub.spans().at(1).first()*Asub.strides().at(1) ); + + } + + { + auto A = tensor_type{2,3}; + auto Asub = A(span(), span(0,2,2) ); + auto B = tensor_type(Asub.extents()); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), A.strides().at(1) * 2 ); + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); + + BOOST_CHECK_EQUAL( B.strides().at(0), Asub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( B.strides().at(1), Asub.span_strides().at(1) ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + + auto Asubsub = Asub( span(1), span() ); + auto C = tensor_type(Asubsub.extents()); + + BOOST_CHECK_EQUAL( Asubsub.strides().at(0), Asub.strides().at(0) ); + BOOST_CHECK_EQUAL( Asubsub.strides().at(1), Asub.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asubsub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asubsub.extents().at(1) , 2 ); + + BOOST_CHECK_EQUAL( C.strides().at(0), Asubsub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( C.strides().at(1), Asubsub.span_strides().at(1)); + + BOOST_CHECK_EQUAL( Asubsub.data() , Asub.data()+ + Asubsub.spans().at(0).first()*Asub.strides().at(0) + + Asubsub.spans().at(1).first()*Asub.strides().at(1) ); + + } + + { + auto A = tensor_type{4,3}; + auto Asub = A( span(0,3,3), span(0,2,ublas::max) ); + auto B = tensor_type(Asub.extents()); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), A.strides().at(0) * 3 ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), A.strides().at(1) * 2 ); + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); + + BOOST_CHECK_EQUAL( B.strides().at(0), Asub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( B.strides().at(1), Asub.span_strides().at(1) ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1) ); + + auto Asubsub = Asub( span(1), span(1,ublas::max) ); + auto C = tensor_type(Asubsub.extents()); + + BOOST_CHECK_EQUAL( Asubsub.strides().at(0), Asub.strides().at(0) ); + BOOST_CHECK_EQUAL( Asubsub.strides().at(1), Asub.strides().at(1) ); + + BOOST_CHECK_EQUAL( Asubsub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asubsub.extents().at(1) , 1 ); + + BOOST_CHECK_EQUAL( C.strides().at(0), Asubsub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( C.strides().at(1), Asubsub.span_strides().at(1) ); + + BOOST_CHECK_EQUAL( Asubsub.data() , Asub.data()+ + Asubsub.spans().at(0).first()*Asub.strides().at(0) + + Asubsub.spans().at(1).first()*Asub.strides().at(1) ); + } + + { + auto A = tensor_type{4,3,5}; + auto Asub = A( span(1,2), span(1,ublas::max), span(2,4) ); + + auto B = tensor_type(Asub.extents()); + + BOOST_CHECK_EQUAL( Asub.strides().at(0), A.strides().at(0) ); + BOOST_CHECK_EQUAL( Asub.strides().at(1), A.strides().at(1) ); + BOOST_CHECK_EQUAL( Asub.strides().at(2), A.strides().at(2) ); + + BOOST_CHECK_EQUAL( Asub.extents().at(0) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(1) , 2 ); + BOOST_CHECK_EQUAL( Asub.extents().at(2) , 3 ); + + BOOST_CHECK_EQUAL( B.strides().at(0), Asub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( B.strides().at(1), Asub.span_strides().at(1) ); + BOOST_CHECK_EQUAL( B.strides().at(2), Asub.span_strides().at(2) ); + + BOOST_CHECK_EQUAL( Asub.data() , A.data()+ + Asub.spans().at(0).first()*A.strides().at(0) + + Asub.spans().at(1).first()*A.strides().at(1)+ + Asub.spans().at(2).first()*A.strides().at(2)); + auto Asubsub = Asub( span(1), span(), span(0,2,2) ); + auto C = tensor_type(Asubsub.extents()); + + BOOST_CHECK_EQUAL( Asubsub.strides().at(0), Asub.strides().at(0) ); + BOOST_CHECK_EQUAL( Asubsub.strides().at(1), Asub.strides().at(1) ); + BOOST_CHECK_EQUAL( Asubsub.strides().at(2), Asub.strides().at(2) * 2); + + BOOST_CHECK_EQUAL( Asubsub.extents().at(0) , 1 ); + BOOST_CHECK_EQUAL( Asubsub.extents().at(1) , 2 ); + BOOST_CHECK_EQUAL( Asubsub.extents().at(2) , 2 ); + + BOOST_CHECK_EQUAL( C.strides().at(0), Asubsub.span_strides().at(0) ); + BOOST_CHECK_EQUAL( C.strides().at(1), Asubsub.span_strides().at(1) ); + BOOST_CHECK_EQUAL( C.strides().at(2), Asubsub.span_strides().at(2) ); + + BOOST_CHECK_EQUAL( Asubsub.data() , Asub.data()+ + Asubsub.spans().at(0).first()*Asub.strides().at(0) + + Asubsub.spans().at(1).first()*Asub.strides().at(1)+ + Asubsub.spans().at(2).first()*Asub.strides().at(2)); + } + +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, fixture ) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + using subtensor_type = typename tensor_type::subtensor_type; auto check = [](auto const& e) { @@ -227,21 +521,19 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, f auto Asub = subtensor_type( A ); auto Bsub = subtensor_type( A ); - + BOOST_CHECK( Asub.span_strides() == A.strides() ); BOOST_CHECK( Asub.strides() == A.strides() ); BOOST_CHECK( Asub.extents() == A.extents() ); BOOST_CHECK( Asub.data() == A.data() ); BOOST_CHECK( Bsub.span_strides() == A.strides() ); BOOST_CHECK( Bsub.strides() == A.strides() ); - BOOST_CHECK( Bsub.getExtents() == A.extents() ); + BOOST_CHECK( Bsub.extents() == A.extents() ); BOOST_CHECK( Bsub.data() == A.data() ); BOOST_CHECK_EQUAL ( Bsub.size() , A.size() ); BOOST_CHECK_EQUAL ( Bsub.rank() , A.rank() ); - - if(ublas::empty(e)) { BOOST_CHECK ( Bsub.empty() ); BOOST_CHECK_EQUAL ( Bsub.data() , nullptr); @@ -262,131 +554,25 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE(subtensor_copy_ctor_test, value, test_types, f } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_copy_ctor_layout, value, test_types, fixture_shape ) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::tensor; - using other_layout_type = std::conditional_t::value, ublas::tag::last_order, ublas::tag::first_order>; - using other_tensor_type = ublas::tensor; - - - for(auto const& e : extents) - { - auto r = tensor_type{e}; - other_tensor_type t = r; - tensor_type q = t; - - BOOST_CHECK_EQUAL ( t.size() , r.size() ); - BOOST_CHECK_EQUAL ( t.rank() , r.rank() ); - BOOST_CHECK ( t.extents() == r.extents() ); - - BOOST_CHECK_EQUAL ( q.size() , r.size() ); - BOOST_CHECK_EQUAL ( q.rank() , r.rank() ); - BOOST_CHECK ( q.strides() == r.strides() ); - BOOST_CHECK ( q.extents() == r.extents() ); - - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( q[i], r[i] ); - } -} - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_copy_move_ctor, value, test_types, fixture ) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::tensor; - - auto check = [](auto const& e) - { - auto r = tensor_type{e}; - auto t = std::move(r); - BOOST_CHECK_EQUAL ( t.size() , e.product() ); - BOOST_CHECK_EQUAL ( t.rank() , e.size() ); - - if(e.empty()) { - BOOST_CHECK ( t.empty() ); - BOOST_CHECK_EQUAL ( t.data() , nullptr); - } - else{ - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - } - - }; - - for(auto const& e : extents) - check(e); -} - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_ctor_extents_init, value, test_types, fixture ) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::tensor; - - std::random_device device{}; - std::minstd_rand0 generator(device()); - - using distribution_type = std::conditional_t, std::uniform_int_distribution<>, std::uniform_real_distribution<> >; - auto distribution = distribution_type(1,6); - - for(auto const& e : extents){ - auto r = static_cast(distribution(generator)); - auto t = tensor_type{e,r}; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], r ); - } -} - - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_ctor_extents_array, value, test_types, fixture) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::tensor; - using array_type = typename tensor_type::array_type; - - for(auto const& e : extents) { - auto a = array_type(e.product()); - auto v = value_type {}; - - for(auto& aa : a){ - aa = v; - v += value_type{1}; - } - auto t = tensor_type{e, a}; - v = value_type{}; - - for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}) - BOOST_CHECK_EQUAL( t[i], v); - } -} - - BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_read_write_single_index_access, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::tensor; + using tensor_type = ublas::tensor_dynamic; + using subtensor_type = typename tensor_type::subtensor_type; + for(auto const& e : extents) { auto t = tensor_type{e}; auto v = value_type {}; - for(auto i = 0ul; i < t.size(); ++i, v+=value_type{1}){ - t[i] = v; + auto s = subtensor_type(t); + for(auto i = 0ul; i < s.size(); ++i, v+=value_type{1}){ + s[i] = v; BOOST_CHECK_EQUAL( t[i], v ); - t(i) = v; + s(i) = v; BOOST_CHECK_EQUAL( t(i), v ); } } @@ -396,69 +582,70 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_read_write_single_index_access, BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_read_write_multi_index_access_at, value, test_types, fixture) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; using value_type = typename value::first_type; using layout_type = typename value::second_type; - using tensor_type = ublas::tensor; - - auto check1 = [](const tensor_type& t) - { - auto v = value_type{}; - for(auto k = 0ul; k < t.size(); ++k){ - BOOST_CHECK_EQUAL(t[k], v); - v+=value_type{1}; - } - }; - - auto check2 = [](const tensor_type& t) - { - std::array k; - auto r = std::is_same_v ? 1 : 0; - auto q = std::is_same_v ? 1 : 0; - auto v = value_type{}; - for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ - for(k[q] = 0ul; k[q] < t.size(q); ++k[q]){ - BOOST_CHECK_EQUAL(t.at(k[0],k[1]), v); - v+=value_type{1}; - } - } - }; - - auto check3 = [](const tensor_type& t) - { - std::array k; - using op_type = std::conditional_t, std::minus<>, std::plus<>>; - auto r = std::is_same_v ? 2 : 0; - auto o = op_type{}; - auto v = value_type{}; - for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ - for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ - for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ - BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2]), v); - v+=value_type{1}; - } - } - } - }; - - auto check4 = [](const tensor_type& t) - { - std::array k; - using op_type = std::conditional_t, std::minus<>, std::plus<>>; - auto r = std::is_same_v ? 3 : 0; - auto o = op_type{}; - auto v = value_type{}; - for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ - for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ - for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ - for(k[o(r,3)] = 0ul; k[o(r,3)] < t.size(o(r,3)); ++k[o(r,3)]){ - BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2],k[3]), v); - v+=value_type{1}; - } - } - } - } - }; + using tensor_type = ublas::tensor_dynamic; + using subtensor_type = typename tensor_type::subtensor_type; + + auto check1 = [](const auto& t) + { + auto v = value_type{}; + for(auto k = 0ul; k < t.size(); ++k){ + BOOST_CHECK_EQUAL(t[k], v); + v+=value_type{1}; + } + }; + + auto check2 = [](const auto& t) + { + std::array k = {0,0}; + auto r = std::is_same::value ? 1 : 0; + auto q = std::is_same::value ? 1 : 0; + auto v = value_type{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[q] = 0ul; k[q] < t.size(q); ++k[q]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1]), v); + v+=value_type{1}; + } + } + }; + + auto check3 = [](const auto& t) + { + std::array k = {0,0,0}; + using op_type = std::conditional_t, std::minus<>, std::plus<>>; + auto r = std::is_same_v ? 2 : 0; + auto o = op_type{}; + auto v = value_type{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ + for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2]), v); + v+=value_type{1}; + } + } + } + }; + + auto check4 = [](const auto& t) + { + std::array k = {0,0,0,0}; + using op_type = std::conditional_t, std::minus<>, std::plus<>>; + auto r = std::is_same_v ? 3 : 0; + auto o = op_type{}; + auto v = value_type{}; + for(k[r] = 0ul; k[r] < t.size(r); ++k[r]){ + for(k[o(r,1)] = 0ul; k[o(r,1)] < t.size(o(r,1)); ++k[o(r,1)]){ + for(k[o(r,2)] = 0ul; k[o(r,2)] < t.size(o(r,2)); ++k[o(r,2)]){ + for(k[o(r,3)] = 0ul; k[o(r,3)] < t.size(o(r,3)); ++k[o(r,3)]){ + BOOST_CHECK_EQUAL(t.at(k[0],k[1],k[2],k[3]), v); + v+=value_type{1}; + } + } + } + } + }; auto check = [check1,check2,check3,check4](auto const& e) { auto t = tensor_type{e}; @@ -468,10 +655,12 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_read_write_multi_index_access_a v+=value_type{1}; } - if(t.rank() == 1) check1(t); - else if(t.rank() == 2) check2(t); - else if(t.rank() == 3) check3(t); - else if(t.rank() == 4) check4(t); + auto s = subtensor_type(t); + + if (t.rank() == 1) check1(s); + else if(t.rank() == 2) check2(s); + else if(t.rank() == 3) check3(s); + else if(t.rank() == 4) check4(s); }; @@ -480,79 +669,4 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_read_write_multi_index_access_a } - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_reshape, value, test_types, fixture) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::tensor; - - - for(auto const& efrom : extents){ - for(auto const& eto : extents){ - - auto v = value_type {}; - v+=value_type{1}; - auto t = tensor_type{efrom, v}; - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], v ); - - t.reshape(eto); - for(auto i = 0ul; i < std::min(efrom.product(),eto.product()); ++i) - BOOST_CHECK_EQUAL( t[i], v ); - - BOOST_CHECK_EQUAL ( t.size() , eto.product() ); - BOOST_CHECK_EQUAL ( t.rank() , eto.size() ); - BOOST_CHECK ( t.extents() == eto ); - - if(efrom != eto){ - for(auto i = efrom.product(); i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], value_type{} ); - } - } - } -} - - - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_swap, value, test_types, fixture) -{ - using namespace boost::numeric; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - using tensor_type = ublas::tensor; - - for(auto const& e_t : extents){ - for(auto const& e_r : extents) { - - auto v = value_type {} + value_type{1}; - auto w = value_type {} + value_type{2}; - auto t = tensor_type{e_t, v}; - auto r = tensor_type{e_r, w}; - - std::swap( r, t ); - - for(auto i = 0ul; i < t.size(); ++i) - BOOST_CHECK_EQUAL( t[i], w ); - - BOOST_CHECK_EQUAL ( t.size() , e_r.product() ); - BOOST_CHECK_EQUAL ( t.rank() , e_r.size() ); - BOOST_CHECK ( t.extents() == e_r ); - - for(auto i = 0ul; i < r.size(); ++i) - BOOST_CHECK_EQUAL( r[i], v ); - - BOOST_CHECK_EQUAL ( r.size() , e_t.product() ); - BOOST_CHECK_EQUAL ( r.rank() , e_t.size() ); - BOOST_CHECK ( r.extents() == e_t ); - - } - } -} - - - BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_expression_evaluation.cpp b/test/tensor/test_subtensor_expression_evaluation.cpp index ef084dc14..019bedd68 100644 --- a/test/tensor/test_subtensor_expression_evaluation.cpp +++ b/test/tensor/test_subtensor_expression_evaluation.cpp @@ -12,41 +12,42 @@ -#include -#include -#include +#include +#include +#include +#include #include "utility.hpp" +#include -#include -#include #include -#include - -BOOST_AUTO_TEST_SUITE(test_subtensor_static_rank_expression) +BOOST_AUTO_TEST_SUITE(test_tensor_expression) using test_types = zip>::with_t; - - - struct fixture { - template - using extents_t = boost::numeric::ublas::extents; - - static constexpr auto extents = - std::make_tuple( -// extents_t<0> {}, - extents_t<2> {1,1}, - extents_t<2> {1,2}, - extents_t<2> {2,1}, - extents_t<2> {2,3}, - extents_t<3> {2,3,1}, - extents_t<3> {4,1,3}, - extents_t<3> {1,2,3}, - extents_t<3> {4,2,3}, - extents_t<4>{4,2,3,5} ); + using extents_t = boost::numeric::ublas::extents<>; + + const std::vector extents = + { +// extents_t{}, // 0 + + extents_t{1,1}, // 1 + extents_t{1,2}, // 2 + extents_t{2,1}, // 3 + + extents_t{2,3}, // 4 + extents_t{2,3,1}, // 5 + extents_t{1,2,3}, // 6 + extents_t{1,1,2,3}, // 7 + extents_t{1,2,3,1,1}, // 8 + + extents_t{4,2,3}, // 9 + extents_t{4,2,1,3}, // 10 + extents_t{4,2,1,3,1}, // 11 + extents_t{1,4,2,1,3,1} // 12 + }; }; @@ -55,19 +56,15 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_expression_retrieve namespace ublas = boost::numeric::ublas; using value_t = typename value::first_type; using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; + using subtensor = typename tensor_t::subtensor_type; auto uplus1 = [](auto const& a){return a + value_t(1); }; auto uplus2 = [](auto const& a){return value_t(2) + a; }; auto bplus = std::plus {}; auto bminus = std::minus{}; - for_each_in_tuple(extents, [&](auto const& /*unused*/, auto const& e){ - - - - static constexpr auto size = std::tuple_size_v>; - using tensor_t = ublas::tensor_static_rank; - using subtensor = typename tensor_t::subtensor_type; + for(auto const& e : extents) { auto t = tensor_t(e); auto v = value_t{}; @@ -95,94 +92,70 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_expression_retrieve BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr ) == e ); - }); - - for_each_in_tuple(extents, [&](auto I, auto const& e1){ - + } - if ( I >= std::tuple_size_v - 1 ){ - return; - } + for(auto i = 0u; i < extents.size()-1; ++i) + { - constexpr auto size1 = std::tuple_size_v>; - using tensor_type1 = ublas::tensor_static_rank; - using subtensor_type1 = typename tensor_type1::subtensor_type; - - - for_each_in_tuple(extents, [&,I](auto J, auto const& e2){ - - if( J != I + 1 ){ - return; - } - - static constexpr auto size1 = std::tuple_size_v>; - static constexpr auto size2 = std::tuple_size_v>; - using tensor_type2 = ublas::tensor_static_rank; - using subtensor_type2 = typename tensor_type2::subtensor_type; + auto v = value_t{}; - auto v = value_t{}; + tensor_t t1(extents[i]); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } - tensor_type1 t1(e1); - for(auto& tt: t1){ tt = v; v+=value_t{1}; } + tensor_t t2(extents[i+1]); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } - tensor_type2 t2(e2); - for(auto& tt: t2){ tt = v; v+=value_t{2}; } + auto s1 = subtensor(t1); + auto s2 = subtensor(t2); - auto s1 = subtensor_type1(t1); - auto s2 = subtensor_type2(t2); + BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) == ublas::detail::retrieve_extents( t1 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( s2 ) == ublas::detail::retrieve_extents( t2 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) != ublas::detail::retrieve_extents( s2 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) == ublas::detail::retrieve_extents( t1 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( s2 ) == ublas::detail::retrieve_extents( t2 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) != ublas::detail::retrieve_extents( s2 ) ); + // uexpr1 = s1+1 + // uexpr2 = 2+s2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( s1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( s2, uplus2 ); - // uexpr1 = s1+1 - // uexpr2 = 2+s2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( s1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( s2, uplus2 ); + BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( s2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) != ublas::detail::retrieve_extents( uexpr2 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( s2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); - BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) != ublas::detail::retrieve_extents( uexpr2 ) ); - if constexpr( size1 == size2 ){ - // bexpr_uexpr = (s1+1) + (2+s2) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + // bexpr_uexpr = (s1+1) + (2+s2) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(s1) ); + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(s1) ); - // bexpr_bexpr_uexpr = ((s1+1) + (2+s2)) - s2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s2, bminus ); + // bexpr_bexpr_uexpr = ((s1+1) + (2+s2)) - s2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s2, bminus ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(s2) ); + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(s2) ); - // bexpr_bexpr_uexpr = s2 - ((s1+1) + (2+s2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( s2, bexpr_uexpr, bminus ); + // bexpr_bexpr_uexpr = s2 - ((s1+1) + (2+s2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( s2, bexpr_uexpr, bminus ); - BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(s2) ); - } + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(s2) ); - }); - }); + } } - BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_expression_all_extents_equal, value, test_types, fixture) { namespace ublas = boost::numeric::ublas; using value_t = typename value::first_type; using layout_t = typename value::second_type; + using tensor_t = ublas::tensor_dynamic; + using subtensor = typename tensor_t::subtensor_type; auto uplus1 = [](auto const& a){return a + value_t(1); }; auto uplus2 = [](auto const& a){return value_t(2) + a; }; auto bplus = std::plus {}; auto bminus = std::minus{}; - for_each_in_tuple(extents, [&](auto const& /*unused*/, auto& e){ - static constexpr auto size = std::tuple_size_v>; - using tensor_t = ublas::tensor_static_rank; - using subtensor = typename tensor_t::subtensor_type; + for(auto const& e : extents) { auto t = tensor_t(e); auto v = value_t{}; @@ -212,85 +185,64 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_expression_all_exte BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_bexpr_uexpr , e ) ); - }); - + }; - for_each_in_tuple(extents, [&](auto I, auto& e1){ - if ( I >= std::tuple_size_v - 1){ - return; - } + for(auto i = 0u; i < extents.size()-1; ++i) + { - static constexpr auto size1 = std::tuple_size_v>; - using tensor_type1 = ublas::tensor_static_rank; - using subtensor_type1 = typename tensor_type1::subtensor_type; - - for_each_in_tuple(extents, [&](auto J, auto& e2){ - - if( J != I + 1 ){ - return; - } - - - static constexpr auto size2 = std::tuple_size_v>; - using tensor_type2 = ublas::tensor_static_rank; - using subtensor_type2 = typename tensor_type2::subtensor_type; - - auto v = value_t{}; + auto v = value_t{}; - tensor_type1 t1(e1); - for(auto& tt: t1){ tt = v; v+=value_t{1}; } + tensor_t t1(extents[i]); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } - tensor_type2 t2(e2); - for(auto& tt: t2){ tt = v; v+=value_t{2}; } + tensor_t t2(extents[i+1]); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } - auto s1 = subtensor_type1(t1); - auto s2 = subtensor_type2(t2); + auto s1 = subtensor(t1); + auto s2 = subtensor(t2); - BOOST_CHECK( ublas::detail::all_extents_equal( t1, ublas::detail::retrieve_extents(t1) ) ); - BOOST_CHECK( ublas::detail::all_extents_equal( t2, ublas::detail::retrieve_extents(t2) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( t1, ublas::detail::retrieve_extents(t1) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( t2, ublas::detail::retrieve_extents(t2) ) ); - // uexpr1 = s1+1 - // uexpr2 = 2+s2 - auto uexpr1 = ublas::detail::make_unary_tensor_expression( s1, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( s2, uplus2 ); + // uexpr1 = s1+1 + // uexpr2 = 2+s2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( s1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( s2, uplus2 ); - BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, ublas::detail::retrieve_extents(uexpr1) ) ); - BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, ublas::detail::retrieve_extents(uexpr2) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, ublas::detail::retrieve_extents(uexpr1) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, ublas::detail::retrieve_extents(uexpr2) ) ); - if constexpr( size1 == size2 ){ - // bexpr_uexpr = (t1+1) + (2+s2) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + // bexpr_uexpr = (s1+1) + (2+s2) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr, ublas::detail::retrieve_extents( bexpr_uexpr ) ) ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr, ublas::detail::retrieve_extents( bexpr_uexpr ) ) ); - // bexpr_bexpr_uexpr = ((t1+1) + (2+s2)) - s2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s2, bminus ); + // bexpr_bexpr_uexpr = ((s1+1) + (2+s2)) - s2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s2, bminus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr1, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) ) ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr1, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) ) ); - // bexpr_bexpr_uexpr = s2 - ((t1+1) + (2+s2)) - auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( s2, bexpr_uexpr, bminus ); + // bexpr_bexpr_uexpr = s2 - ((s1+1) + (2+s2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( s2, bexpr_uexpr, bminus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) ) ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) ) ); - // bexpr_uexpr2 = (t1+1) + s2 - auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, s2, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_uexpr2 ) ) ); + // bexpr_uexpr2 = (s1+1) + s2 + auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, s2, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_uexpr2 ) ) ); - // bexpr_uexpr2 = ((t1+1) + s2) + t1 - auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr3, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr3 ) ) ); + // bexpr_uexpr2 = ((s1+1) + s2) + s1 + auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, s1, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr3, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr3 ) ) ); - // bexpr_uexpr2 = t1 + (((t1+1) + s2) + t1) - auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); - BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr4, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr4 ) ) ); - } + // bexpr_uexpr2 = s1 + (((s1+1) + s2) + s1) + auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr4, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr4 ) ) ); - }); - }); + } } diff --git a/test/tensor/test_subtensor_matrix_vector.cpp b/test/tensor/test_subtensor_matrix_vector.cpp deleted file mode 100644 index 7baf235c3..000000000 --- a/test/tensor/test_subtensor_matrix_vector.cpp +++ /dev/null @@ -1,286 +0,0 @@ -// -// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -// -// The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany -// - - -#include -#include -#include -#include -#include - -#include "utility.hpp" - -BOOST_AUTO_TEST_SUITE ( test_tensor_static_rank_matrix_interoperability ) - -using test_types = zip::with_t; - - - -struct fixture -{ - template - using extents_type = boost::numeric::ublas::extents; - - std::tuple< - extents_type<2>, // 0 - extents_type<2>, // 1 - extents_type<2>, // 2 - extents_type<2>, // 3 - extents_type<2> // 4 - > extents = { - {1,1}, - {1,2}, - {2,1}, - {6,6}, - {9,7}, - }; -}; - - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_copy_assignment, value, test_types, fixture ) -{ - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - - for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e) { - using etype = std::decay_t; - constexpr auto size = std::tuple_size_v; - using tensor = ublas::tensor_static_rank; - using matrix = typename tensor::matrix_type; - - assert(ublas::size(e) == 2); - auto t = tensor{e[1],e[0]}; - auto r = matrix(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r; - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto j = 0ul; j < t.size(1); ++j){ - for(auto i = 0ul; i < t.size(0); ++i){ - BOOST_CHECK_EQUAL( t.at(i,j), r(i,j) ); - } - } - }); - - //for_each_in_tuple(extents,check); -} - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_copy_assignment, value, test_types, fixture ) -{ - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto check = [](auto const& /*unused*/, auto& e) { - constexpr auto size = std::tuple_size_v>; - using tensor_type = ublas::tensor_static_rank; - using vector_type = typename tensor_type::vector_type; - - assert(ublas::size(e) == 2); - auto t = tensor_type{e[1],e[0]}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r; - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - for(auto i = 0ul; i < t.size(); ++i){ - BOOST_CHECK_EQUAL( t[i], r(i) ); - } - }; - - for_each_in_tuple(extents,check); -} - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_expressions, value, test_types, fixture ) -{ - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - - auto check = [](auto const& /*unused*/, auto& e) { - constexpr auto size = std::tuple_size_v>; - using tensor_type = ublas::tensor_static_rank; - using matrix_type = typename tensor_type::matrix_type; - - assert(ublas::size(e) == 2); - auto t = tensor_type{e[1],e[0]}; - auto r = matrix_type(e[0],e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r + 3*r; - tensor_type s = r + 3*r; - tensor_type q = s + r + 3*r + s; // + 3*r - - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( s.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); - BOOST_CHECK ( !s.empty() ); - BOOST_CHECK_NE ( s.data() , nullptr); - - BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0) ); - BOOST_CHECK_EQUAL ( q.extents().at(1) , e.at(1) ); - BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); - BOOST_CHECK ( !q.empty() ); - BOOST_CHECK_NE ( q.data() , nullptr); - - - for(auto j = 0ul; j < t.size(1); ++j){ - for(auto i = 0ul; i < t.size(0); ++i){ - BOOST_CHECK_EQUAL( t.at(i,j), 4*r(i,j) ); - BOOST_CHECK_EQUAL( s.at(i,j), t.at(i,j) ); - BOOST_CHECK_EQUAL( q.at(i,j), 3*s.at(i,j) ); - } - } - }; - - for_each_in_tuple(extents,check); -} - - - - - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_vector_expressions, value, test_types, fixture ) -{ - namespace ublas = boost::numeric::ublas; - using value_type = typename value::first_type; - using layout_type = typename value::second_type; - - auto check = [](auto const& /*unused*/, auto& e) { - constexpr auto size = std::tuple_size_v>; - using tensor_type = ublas::tensor_static_rank; - using vector_type = typename tensor_type::vector_type; - - assert(ublas::size(e) == 2); - auto t = tensor_type{e[1],e[0]}; - auto r = vector_type(e[0]*e[1]); - std::iota(r.data().begin(),r.data().end(), 1); - t = r + 3*r; - tensor_type s = r + 3*r; - tensor_type q = s + r + 3*r + s; // + 3*r - - - BOOST_CHECK_EQUAL ( t.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( t.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( t.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( t.rank() , ublas::size (e) ); - BOOST_CHECK ( !t.empty() ); - BOOST_CHECK_NE ( t.data() , nullptr); - - BOOST_CHECK_EQUAL ( s.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( s.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( s.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( s.rank() , ublas::size (e) ); - BOOST_CHECK ( !s.empty() ); - BOOST_CHECK_NE ( s.data() , nullptr); - - BOOST_CHECK_EQUAL ( q.extents().at(0) , e.at(0)*e.at(1) ); - BOOST_CHECK_EQUAL ( q.extents().at(1) , 1); - BOOST_CHECK_EQUAL ( q.size() , ublas::product(e) ); - BOOST_CHECK_EQUAL ( q.rank() , ublas::size (e) ); - BOOST_CHECK ( !q.empty() ); - BOOST_CHECK_NE ( q.data() , nullptr); - - - - for(auto i = 0ul; i < t.size(); ++i){ - BOOST_CHECK_EQUAL( t.at(i), 4*r(i) ); - BOOST_CHECK_EQUAL( s.at(i), t.at(i) ); - BOOST_CHECK_EQUAL( q.at(i), 3*s.at(i) ); - } - }; - - for_each_in_tuple(extents,check); -} - - - -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_matrix_vector_expressions, pair, test_types, fixture ) -{ - namespace ublas = boost::numeric::ublas; - using value = typename pair::first_type; - using layout = typename pair::second_type; - - - auto check = [](auto const& /*unused*/, auto& e) { - constexpr auto size = std::tuple_size_v>; - using tensor = ublas::tensor_static_rank; - using matrix = typename tensor::matrix_type; - using vector = typename tensor::vector_type; - - if(product(e) <= 2) - return; - assert(ublas::size(e) == 2); - auto Q = tensor{e[0],1}; - auto A = matrix(e[0],e[1]); - auto b = vector(e[1]); - auto c = vector(e[0]); - std::iota(b.data().begin(),b.data().end(), 1); - std::fill(A.data().begin(),A.data().end(), 1); - std::fill(c.data().begin(),c.data().end(), 2); - std::fill(Q.begin(),Q.end(), 2); - - tensor T = Q + (ublas::prod(A , b) + 2*c) + 3*Q; - - BOOST_CHECK_EQUAL ( T.extents().at(0) , Q.extents().at(0) ); - BOOST_CHECK_EQUAL ( T.extents().at(1) , Q.extents().at(1)); - BOOST_CHECK_EQUAL ( T.size() , Q.size() ); - BOOST_CHECK_EQUAL ( T.size() , c.size() ); - BOOST_CHECK_EQUAL ( T.rank() , Q.rank() ); - BOOST_CHECK ( !T.empty() ); - BOOST_CHECK_NE ( T.data() , nullptr); - - const auto n = e[1]; - const auto ab = value(std::div(n*(n+1),2).quot); - const auto ref = ab+4*Q(0)+2*c(0); - BOOST_CHECK( std::all_of(T.begin(),T.end(), [ref](auto cc){ return ref == cc; }) ); - -// for(auto i = 0ul; i < T.size(); ++i){ -// auto n = e[1]; -// auto ab = n * (n+1) / 2; -// BOOST_CHECK_EQUAL( T(i), ab+4*Q(0)+2*c(0) ); -// } - - }; - for_each_in_tuple(extents,check); -} - - -BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_operators_arithmetic.cpp b/test/tensor/test_subtensor_operators_arithmetic.cpp index eea3a9214..124380d58 100644 --- a/test/tensor/test_subtensor_operators_arithmetic.cpp +++ b/test/tensor/test_subtensor_operators_arithmetic.cpp @@ -1,6 +1,4 @@ -// -// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018-2021 Cem Bassoy // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at @@ -18,7 +16,7 @@ #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_arithmetic_operations) +BOOST_AUTO_TEST_SUITE(test_subtensor_arithmetic_operations) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; @@ -26,164 +24,187 @@ using test_types = zip::with_t - using extents_t = boost::numeric::ublas::extents; - - std::tuple< - extents_t<2>, // 1 - extents_t<2>, // 2 - extents_t<3>, // 3 - extents_t<3>, // 4 - extents_t<4> // 5 - > extents = { - extents_t<2>{1,1}, - extents_t<2>{2,3}, - extents_t<3>{4,1,3}, - extents_t<3>{4,2,3}, - extents_t<4>{4,2,3,5} + using extents_type = boost::numeric::ublas::extents<>; + + std::vector extents = + { +// extents_type{}, // 0 + extents_type{1,1}, // 1 + extents_type{1,2}, // 2 + extents_type{2,1}, // 3 + extents_type{2,3}, // 4 + extents_type{2,3,1}, // 5 + extents_type{4,1,3}, // 6 + extents_type{1,2,3}, // 7 + extents_type{4,2,3}, // 8 + extents_type{4,2,3,5} // 9 }; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_binary_arithmetic_operations, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_binary_arithmetic_operations, value, test_types, fixture) { namespace ublas = boost::numeric::ublas; - using value_t = typename value::first_type; - using layout_t = typename value::second_type; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + using subtensor = typename tensor_type::subtensor_type; - auto check = [](auto const& /*unused*/, auto& e) + auto check = [](auto const& e) { - constexpr auto size = std::tuple_size_v>; - using tensor_t = ublas::tensor_static_rank; - using subtensor = typename tensor_t::subtensor_type; - auto t = tensor_t (e); - auto t2 = tensor_t (e); - auto r = tensor_t (e); + + auto t = tensor_type (e); + auto t2 = tensor_type (e); + auto r = tensor_type (e); auto s = subtensor(t); - auto v = value_t {}; + auto s2 = subtensor(t2); + auto v = value_type {}; BOOST_CHECK_EQUAL(t.size(), s.size()); std::iota(t.begin(), t.end(), v); std::iota(t2.begin(), t2.end(), v+2); - r = s + s + s + t2; + r = s + s + s + s2; for(auto i = 0ul; i < s.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 3*s(i) + t2(i) ); + BOOST_CHECK_EQUAL ( r(i), 3*s(i) + s2(i) ); - r = t2 / (s+3) * (s+1) - t2; // r = ( t2/ ((s+3)*(s+1)) ) - t2 + r = s2 / (s+3) * (s+1) - s2; // r = ( s2/ ((s+3)*(s+1)) ) - s2 for(auto i = 0ul; i < s.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), t2(i) / (s(i)+3)*(s(i)+1) - t2(i) ); + BOOST_CHECK_EQUAL ( r(i), s2(i) / (s(i)+3)*(s(i)+1) - s2(i) ); - r = 3+t2 / (s+3) * (s+1) * s - t2; // r = 3+( t2/ ((s+3)*(s+1)*s) ) - t2 + r = 3+s2 / (s+3) * (s+1) * s - s2; // r = 3+( s2/ ((s+3)*(s+1)*s) ) - s2 for(auto i = 0ul; i < s.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), 3+t2(i) / (s(i)+3)*(s(i)+1)*s(i) - t2(i) ); + BOOST_CHECK_EQUAL ( r(i), 3+s2(i) / (s(i)+3)*(s(i)+1)*s(i) - s2(i) ); - r = t2 - s + t2 - s; + r = s2 - s + s2 - s; for(auto i = 0ul; i < r.size(); ++i) BOOST_CHECK_EQUAL ( r(i), 4 ); - r = s * s * s * t2; + r = s * s * s * s2; for(auto i = 0ul; i < s.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), s(i)*s(i)*s(i)*t2(i) ); + BOOST_CHECK_EQUAL ( r(i), s(i)*s(i)*s(i)*s2(i) ); - r = (t2/t2) * (t2/t2); + r = (s2/s2) * (s2/s2); for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( r(i), 1 ); }; - for_each_in_tuple(extents,check); + for(auto const& e : extents) + check(e); + + auto t0 = tensor_type(extents.at(0)); + auto t1 = tensor_type(extents.at(1)); + auto t2 = tensor_type(extents.at(2)); + + BOOST_CHECK_NO_THROW ( tensor_type t = subtensor(t0) + t0 ); + BOOST_CHECK_NO_THROW ( tensor_type t = subtensor(t0) + subtensor(t0) ); + BOOST_CHECK_THROW ( tensor_type t = subtensor(t0) + subtensor(t2), std::runtime_error ); + BOOST_CHECK_THROW ( tensor_type t = subtensor(t1) + t2, std::runtime_error ); + } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_unary_arithmetic_operations, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_unary_arithmetic_operations, value, test_types, fixture) { namespace ublas = boost::numeric::ublas; - using value_t = typename value::first_type; - using layout_t = typename value::second_type; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + using subtensor = typename tensor_type::subtensor_type; - auto check = [](auto const& /*unused*/, auto& e) + auto check = [](auto const& e) { - constexpr auto size = std::tuple_size_v>; - using tensor_t = ublas::tensor_static_rank; - using subtensor = typename tensor_t::subtensor_type; - auto t = tensor_t (e); - auto t2 = tensor_t (e); - auto v = value_t {}; + auto t = tensor_type (e); + auto t2 = tensor_type (e); + auto v = value_type {}; auto s = subtensor(t); + auto s2 = subtensor(t2); BOOST_CHECK_EQUAL(t.size(), s.size()); + BOOST_CHECK_EQUAL(t2.size(), s2.size()); std::iota(t.begin(), t.end(), v); std::iota(t2.begin(), t2.end(), v+2); - tensor_t r1 = s + 2 + s + 2; + tensor_type r1 = s + 2 + s + 2; for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( r1(i), 2*s(i) + 4 ); - tensor_t r2 = 2 + s + 2 + s; + tensor_type r2 = 2 + s + 2 + s; for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( r2(i), 2*s(i) + 4 ); - tensor_t r3 = (s-2) + (s-2); + tensor_type r3 = (s-2) + (s-2); for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( r3(i), 2*s(i) - 4 ); - tensor_t r4 = (s*2) * (3*s); + tensor_type r4 = (s*2) * (3*s); for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( r4(i), 2*3*s(i)*s(i) ); - tensor_t r5 = (t2*2) / (2*t2) * t2; + tensor_type r5 = (s2*2) / (2*s2) * s2; for(auto i = 0ul; i < s.size(); ++i) - BOOST_CHECK_EQUAL ( r5(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); + BOOST_CHECK_EQUAL ( r5(i), (s2(i)*2) / (2*s2(i)) * s2(i) ); - tensor_t r6 = (t2/2+1) / (2/t2+1) / t2; + tensor_type r6 = (s2/2+1) / (2/s2+1) / s2; for(auto i = 0ul; i < s.size(); ++i) - BOOST_CHECK_EQUAL ( r6(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); + BOOST_CHECK_EQUAL ( r6(i), (s2(i)/2+1) / (2/s2(i)+1) / s2(i) ); }; - for_each_in_tuple(extents,check); + for(auto const& e : extents) + check(e); + + + auto t0 = tensor_type(extents.at(0)); + auto t2 = tensor_type(extents.at(2)); + + + BOOST_CHECK_NO_THROW ( tensor_type t = subtensor(t0) + 2 + t0 ); + BOOST_CHECK_NO_THROW ( tensor_type t = subtensor(t0) + 2 + subtensor(t0) ); + BOOST_CHECK_THROW ( tensor_type t = subtensor(t0) + 2 + t2, std::runtime_error ); + BOOST_CHECK_THROW ( tensor_type t = subtensor(t0) + 2 + subtensor(t2), std::runtime_error ); + } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_assign_arithmetic_operations, value, test_types, fixture) { namespace ublas = boost::numeric::ublas; - using value_t = typename value::first_type; - using layout_t = typename value::second_type; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + using subtensor = typename tensor_type::subtensor_type; - auto check = [](auto const& /*unused*/, auto& e) + auto check = [](auto const& e) { - constexpr auto size = std::tuple_size_v>; - using tensor_t = ublas::tensor_static_rank; - using subtensor = typename tensor_t::subtensor_type; - - auto t = tensor_t (e); - auto t2 = tensor_t (e); - auto r = tensor_t (e); - auto v = value_t {}; + auto t = tensor_type (e); + auto t2 = tensor_type (e); + auto r = tensor_type (e); + auto v = value_type {}; auto s = subtensor(t); + auto s2 = subtensor(t2); BOOST_CHECK_EQUAL(t.size(), s.size()); @@ -221,31 +242,32 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_assign_arithmetic_operations, valu for(auto i = 0ul; i < s.size(); ++i) BOOST_CHECK_EQUAL ( r(i), 2*3*s(i)*s(i) ); - r = (t2*2); + r = (s2*2); r /= 2; - r /= t2; - r *= t2; + r /= s2; + r *= s2; for(auto i = 0ul; i < s.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), (t2(i)*2) / (2*t2(i)) * t2(i) ); + BOOST_CHECK_EQUAL ( r(i), (s2(i)*2) / (2*s2(i)) * s2(i) ); - r = (t2/2+1); - r /= (2/t2+1); - r /= t2; + r = (s2/2+1); + r /= (2/s2+1); + r /= s2; for(auto i = 0ul; i < s.size(); ++i) - BOOST_CHECK_EQUAL ( r(i), (t2(i)/2+1) / (2/t2(i)+1) / t2(i) ); + BOOST_CHECK_EQUAL ( r(i), (s2(i)/2+1) / (2/s2(i)+1) / s2(i) ); - tensor_t q = -r; + s = -r; for(auto i = 0ul; i < s.size(); ++i) - BOOST_CHECK_EQUAL ( q(i), -r(i) ); + BOOST_CHECK_EQUAL ( s(i), -r(i) ); - tensor_t p = +r; + s = +r; for(auto i = 0ul; i < s.size(); ++i) - BOOST_CHECK_EQUAL ( p(i), r(i) ); + BOOST_CHECK_EQUAL ( s(i), r(i) ); }; - for_each_in_tuple(extents,check); + for(auto const& e : extents) + check(e); } diff --git a/test/tensor/test_subtensor_operators_comparison.cpp b/test/tensor/test_subtensor_operators_comparison.cpp index 7b80509f4..fa852132f 100644 --- a/test/tensor/test_subtensor_operators_comparison.cpp +++ b/test/tensor/test_subtensor_operators_comparison.cpp @@ -1,63 +1,63 @@ -// -// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com -// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2018 Cem Bassoy // // Distributed under the Boost Software License, Version 1.0. (See // accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) // // The authors gratefully acknowledge the support of -// Google and Fraunhofer IOSB, Ettlingen, Germany +// Fraunhofer and Google in producing this work +// which started as a Google Summer of Code project. // +#include +#include #include #include #include #include "utility.hpp" -BOOST_AUTO_TEST_SUITE(test_tensor_static_rank_comparison) + +BOOST_AUTO_TEST_SUITE(test_subtensor_comparison/*, * boost::unit_test::depends_on("test_subtensor")*/) using double_extended = boost::multiprecision::cpp_bin_float_double_extended; using test_types = zip::with_t; struct fixture { - template - using extents_t = boost::numeric::ublas::extents; - - std::tuple< - extents_t<2>, // 1 - extents_t<2>, // 2 - extents_t<3>, // 3 - extents_t<3>, // 4 - extents_t<4> // 5 - > extents = { - extents_t<2>{1,1}, - extents_t<2>{2,3}, - extents_t<3>{4,1,3}, - extents_t<3>{4,2,3}, - extents_t<4>{4,2,3,5} - }; + using extents_type = boost::numeric::ublas::extents<>; + fixture() + : extents{ + // extents_type{}, // 0 + extents_type{1,1}, // 1 + extents_type{1,2}, // 2 + extents_type{2,1}, // 3 + extents_type{2,3}, // 4 + extents_type{2,3,1}, // 5 + extents_type{4,1,3}, // 6 + extents_type{1,2,3}, // 7 + extents_type{4,2,3}, // 8 + extents_type{4,2,3,5}} // 9 + { + } + std::vector extents; }; -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_comparison, value, test_types, fixture) { namespace ublas = boost::numeric::ublas; - using value_t = typename value::first_type; - using layout_t = typename value::second_type; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + using subtensor = typename tensor_type::subtensor_type; - auto check = [](auto const& /*unused*/, auto& e) + auto check = [](auto const& e) { - using extents_t = std::decay_t; - using tensor_t = ublas::tensor_static_rank, layout_t>; - using subtensor = typename tensor_t::subtensor_type; - - auto t = tensor_t (e); - auto t2 = tensor_t (e); - auto v = value_t {}; + auto t = tensor_type (e); + auto t2 = tensor_type (e); + auto v = value_type {}; auto s = subtensor(t); @@ -82,26 +82,45 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison, value, test_types, fi BOOST_CHECK( t2 >= s ); }; - for_each_in_tuple(extents,check); + for(auto const& e : extents) + check(e); + + auto e0 = extents.at(0); + auto e1 = extents.at(1); + auto e2 = extents.at(2); + + tensor_type t0(e0); + tensor_type t1(e1); + tensor_type t2(e2); + + auto b = false; + BOOST_CHECK_NO_THROW ( b = (subtensor(t0) == subtensor(t0))); + BOOST_CHECK_NO_THROW ( b = (subtensor(t1) == subtensor(t2))); + BOOST_CHECK_NO_THROW ( b = (subtensor(t0) == subtensor(t2))); + BOOST_CHECK_NO_THROW ( b = (subtensor(t1) != subtensor(t2))); + + BOOST_CHECK_THROW ( b = (subtensor(t1) >= subtensor(t2)), std::runtime_error ); + BOOST_CHECK_THROW ( b = (subtensor(t1) <= subtensor(t2)), std::runtime_error ); + BOOST_CHECK_THROW ( b = (subtensor(t1) < subtensor(t2)), std::runtime_error ); + BOOST_CHECK_THROW ( b = (subtensor(t1) > subtensor(t2)), std::runtime_error ); } -BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions, value, test_types, fixture) +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_comparison_with_tensor_expressions, value, test_types, fixture) { namespace ublas = boost::numeric::ublas; - using value_t = typename value::first_type; - using layout_t = typename value::second_type; - + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + using subtensor = typename tensor_type::subtensor_type; - for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { - using extents_t = std::decay_t; - using tensor_t = ublas::tensor_static_rank, layout_t>; - using subtensor = typename tensor_t::subtensor_type; - auto t = tensor_t (e); - auto t2 = tensor_t (e); - auto v = value_t {}; + auto check = [](auto const& e) + { + auto t = tensor_type (e); + auto t2 = tensor_type (e); + auto v = value_type {}; auto s = subtensor(t); std::iota(t.begin(), t.end(), v); @@ -126,78 +145,119 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_tensor_expressions BOOST_CHECK( 2*t2 >= t2 ); BOOST_CHECK( t2 <= 2*t2); BOOST_CHECK( 3*t2 >= s ); - }); + + }; + + for(auto const& e : extents) + check(e); + + auto e0 = extents.at(0); + auto e1 = extents.at(1); + auto e2 = extents.at(2); + tensor_type t0(e0); + tensor_type t1(e1); + tensor_type t2(e2); + + auto b = false; + BOOST_CHECK_NO_THROW (b = subtensor(t0) == (subtensor(t0) + subtensor(t0)) ); + BOOST_CHECK_NO_THROW (b = subtensor(t1) == (subtensor(t2) + subtensor(t2)) ); + BOOST_CHECK_NO_THROW (b = subtensor(t0) == (subtensor(t2) + 2) ); + BOOST_CHECK_NO_THROW (b = subtensor(t1) != (2 + subtensor(t2)) ); + + BOOST_CHECK_NO_THROW (b = (subtensor(t0) + subtensor(t0)) == subtensor(t0) ); + BOOST_CHECK_NO_THROW (b = (subtensor(t2) + subtensor(t2)) == subtensor(t1) ); + BOOST_CHECK_NO_THROW (b = (subtensor(t2) + 2) == subtensor(t0) ); + BOOST_CHECK_NO_THROW (b = (2 + subtensor(t2)) != subtensor(t1) ); + + BOOST_CHECK_THROW (b = subtensor(t1) >= (subtensor(t2) + subtensor(t2)), std::runtime_error ); + BOOST_CHECK_THROW (b = subtensor(t1) <= (subtensor(t2) + subtensor(t2)), std::runtime_error ); + BOOST_CHECK_THROW (b = subtensor(t1) < (subtensor(t2) + subtensor(t2)), std::runtime_error ); + BOOST_CHECK_THROW (b = subtensor(t1) > (subtensor(t2) + subtensor(t2)), std::runtime_error ); + + BOOST_CHECK_THROW (b = subtensor(t1) >= (subtensor(t2) + 2), std::runtime_error ); + BOOST_CHECK_THROW (b = subtensor(t1) <= (2 + subtensor(t2)), std::runtime_error ); + BOOST_CHECK_THROW (b = subtensor(t1) < (subtensor(t2) + 3), std::runtime_error ); + BOOST_CHECK_THROW (b = subtensor(t1) > (4 + subtensor(t2)), std::runtime_error ); + } -//BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_tensor_comparison_with_scalar, value, test_types, fixture) -//{ -// namespace ublas = boost::numeric::ublas; -// using value_t = typename value::first_type; -// using layout_t = typename value::second_type; +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_comparison_with_scalar, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_type = typename value::first_type; + using layout_type = typename value::second_type; + using tensor_type = ublas::tensor_dynamic; + using subtensor = typename tensor_type::subtensor_type; + + + auto check = [](auto const& e) + { + tensor_type t2 = tensor_type(e,2); + tensor_type t3 = tensor_type(e,3); -// for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e) { -// using extents_t = std::decay_t; -// using tensor_t = ublas::tensor_static_rank, layout_t>; + BOOST_CHECK( subtensor(t2) == subtensor(t2) ); + BOOST_CHECK( subtensor(t2) != subtensor(t3) ); -// BOOST_CHECK( tensor_t(e,value_t{2}) == tensor_t(e,value_t{2}) ); -// BOOST_CHECK( tensor_t(e,value_t{2}) != tensor_t(e,value_t{1}) ); + if(ublas::empty(e)) + return; -// if(ublas::empty(e)) -// return; -// BOOST_CHECK( !(tensor_t(e,2) < 2) ); -// BOOST_CHECK( !(tensor_t(e,2) > 2) ); -// BOOST_CHECK( (tensor_t(e,2) >= 2) ); -// BOOST_CHECK( (tensor_t(e,2) <= 2) ); -// BOOST_CHECK( (tensor_t(e,2) == 2) ); -// BOOST_CHECK( (tensor_t(e,2) != 3) ); + BOOST_CHECK( !(subtensor(t2) < 2) ); + BOOST_CHECK( !(subtensor(t2) > 2) ); + BOOST_CHECK( (subtensor(t2) >= 2) ); + BOOST_CHECK( (subtensor(t2) <= 2) ); + BOOST_CHECK( (subtensor(t2) == 2) ); + BOOST_CHECK( (subtensor(t2) != 3) ); -// BOOST_CHECK( !(2 > tensor_t(e,2)) ); -// BOOST_CHECK( !(2 < tensor_t(e,2)) ); -// BOOST_CHECK( (2 <= tensor_t(e,2)) ); -// BOOST_CHECK( (2 >= tensor_t(e,2)) ); -// BOOST_CHECK( (2 == tensor_t(e,2)) ); -// BOOST_CHECK( (3 != tensor_t(e,2)) ); + BOOST_CHECK( !(2 > subtensor(t2)) ); + BOOST_CHECK( !(2 < subtensor(t2)) ); + BOOST_CHECK( (2 <= subtensor(t2)) ); + BOOST_CHECK( (2 >= subtensor(t2)) ); + BOOST_CHECK( (2 == subtensor(t2)) ); + BOOST_CHECK( (3 != subtensor(t2)) ); -// BOOST_CHECK( !( tensor_t(e,2)+3 < 5) ); -// BOOST_CHECK( !( tensor_t(e,2)+3 > 5) ); -// BOOST_CHECK( ( tensor_t(e,2)+3 >= 5) ); -// BOOST_CHECK( ( tensor_t(e,2)+3 <= 5) ); -// BOOST_CHECK( ( tensor_t(e,2)+3 == 5) ); -// BOOST_CHECK( ( tensor_t(e,2)+3 != 6) ); + BOOST_CHECK( !( subtensor(t2)+3 < 5) ); + BOOST_CHECK( !( subtensor(t2)+3 > 5) ); + BOOST_CHECK( ( subtensor(t2)+3 >= 5) ); + BOOST_CHECK( ( subtensor(t2)+3 <= 5) ); + BOOST_CHECK( ( subtensor(t2)+3 == 5) ); + BOOST_CHECK( ( subtensor(t2)+3 != 6) ); -// BOOST_CHECK( !( 5 > tensor_t(e,2)+3) ); -// BOOST_CHECK( !( 5 < tensor_t(e,2)+3) ); -// BOOST_CHECK( ( 5 >= tensor_t(e,2)+3) ); -// BOOST_CHECK( ( 5 <= tensor_t(e,2)+3) ); -// BOOST_CHECK( ( 5 == tensor_t(e,2)+3) ); -// BOOST_CHECK( ( 6 != tensor_t(e,2)+3) ); + BOOST_CHECK( !( 5 > subtensor(t2)+3) ); + BOOST_CHECK( !( 5 < subtensor(t2)+3) ); + BOOST_CHECK( ( 5 >= subtensor(t2)+3) ); + BOOST_CHECK( ( 5 <= subtensor(t2)+3) ); + BOOST_CHECK( ( 5 == subtensor(t2)+3) ); + BOOST_CHECK( ( 6 != subtensor(t2)+3) ); -// BOOST_CHECK( !( tensor_t(e,2)+tensor_t(e,3) < 5) ); -// BOOST_CHECK( !( tensor_t(e,2)+tensor_t(e,3) > 5) ); -// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) >= 5) ); -// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) <= 5) ); -// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) == 5) ); -// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) != 6) ); + BOOST_CHECK( !( subtensor(t2)+subtensor(t3) < 5) ); + BOOST_CHECK( !( subtensor(t2)+subtensor(t3) > 5) ); + BOOST_CHECK( ( subtensor(t2)+subtensor(t3) >= 5) ); + BOOST_CHECK( ( subtensor(t2)+subtensor(t3) <= 5) ); + BOOST_CHECK( ( subtensor(t2)+subtensor(t3) == 5) ); + BOOST_CHECK( ( subtensor(t2)+subtensor(t3) != 6) ); -// BOOST_CHECK( !( 5 > tensor_t(e,2)+tensor_t(e,3)) ); -// BOOST_CHECK( !( 5 < tensor_t(e,2)+tensor_t(e,3)) ); -// BOOST_CHECK( ( 5 >= tensor_t(e,2)+tensor_t(e,3)) ); -// BOOST_CHECK( ( 5 <= tensor_t(e,2)+tensor_t(e,3)) ); -// BOOST_CHECK( ( 5 == tensor_t(e,2)+tensor_t(e,3)) ); -// BOOST_CHECK( ( 6 != tensor_t(e,2)+tensor_t(e,3)) ); + BOOST_CHECK( !( 5 > subtensor(t2)+subtensor(t3)) ); + BOOST_CHECK( !( 5 < subtensor(t2)+subtensor(t3)) ); + BOOST_CHECK( ( 5 >= subtensor(t2)+subtensor(t3)) ); + BOOST_CHECK( ( 5 <= subtensor(t2)+subtensor(t3)) ); + BOOST_CHECK( ( 5 == subtensor(t2)+subtensor(t3)) ); + BOOST_CHECK( ( 6 != subtensor(t2)+subtensor(t3)) ); -// }); + }; + + for(auto const& e : extents) + check(e); -//} +} BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_static_rank_expression_evaluation.cpp b/test/tensor/test_subtensor_static_rank_expression_evaluation.cpp new file mode 100644 index 000000000..a533bd248 --- /dev/null +++ b/test/tensor/test_subtensor_static_rank_expression_evaluation.cpp @@ -0,0 +1,296 @@ +// +// Copyright (c) 2020, Amit Singh, amitsingh19975@gmail.com +// Copyright (c) 2021, Cem Bassoy, cem.bassoy@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + + +#include +#include +#include + +#include "utility.hpp" + +#include +#include +#include +#include + +BOOST_AUTO_TEST_SUITE(test_subtensor_static_rank_expression) + +using test_types = zip>::with_t; + + + + +struct fixture +{ + template + using extents_t = boost::numeric::ublas::extents; + + static constexpr auto extents = std::make_tuple( +// extents_t<0> {}, + extents_t<2> {1,1}, + extents_t<2> {1,2}, + extents_t<2> {2,1}, + extents_t<2> {2,3}, + extents_t<3> {2,3,1}, + extents_t<3> {4,1,3}, + extents_t<3> {1,2,3}, + extents_t<3> {4,2,3}, + extents_t<4>{4,2,3,5} ); +}; + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_expression_retrieve_extents, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto uplus1 = [](auto const& a){return a + value_t(1); }; + auto uplus2 = [](auto const& a){return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; + + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto const& e){ + + + + static constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using subtensor = typename tensor_t::subtensor_type; + + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } + auto s = subtensor(t); + + BOOST_CHECK( ublas::detail::retrieve_extents( s ) == e ); + + // uexpr1 = s+1 + // uexpr2 = 2+s + auto uexpr1 = ublas::detail::make_unary_tensor_expression( s, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( s, uplus2 ); + + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) == e ); + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr2 ) == e ); + + // bexpr_uexpr = (s+1) + (2+s) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == e ); + + + // bexpr_bexpr_uexpr = ((s+1) + (2+s)) - s + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s, bminus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr ) == e ); + + }); + + for_each_in_tuple(extents, [&](auto I, auto const& e1){ + + + if ( I >= std::tuple_size_v - 1 ){ + return; + } + + constexpr auto size1 = std::tuple_size_v>; + using tensor_type1 = ublas::tensor_static_rank; + using subtensor_type1 = typename tensor_type1::subtensor_type; + + + for_each_in_tuple(extents, [&,I](auto J, auto const& e2){ + + if( J != I + 1 ){ + return; + } + + static constexpr auto size1 = std::tuple_size_v>; + static constexpr auto size2 = std::tuple_size_v>; + using tensor_type2 = ublas::tensor_static_rank; + using subtensor_type2 = typename tensor_type2::subtensor_type; + + auto v = value_t{}; + + tensor_type1 t1(e1); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } + + tensor_type2 t2(e2); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } + + auto s1 = subtensor_type1(t1); + auto s2 = subtensor_type2(t2); + + BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) == ublas::detail::retrieve_extents( t1 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( s2 ) == ublas::detail::retrieve_extents( t2 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) != ublas::detail::retrieve_extents( s2 ) ); + + // uexpr1 = s1+1 + // uexpr2 = 2+s2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( s1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( s2, uplus2 ); + + BOOST_CHECK( ublas::detail::retrieve_extents( s1 ) == ublas::detail::retrieve_extents( uexpr1 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( s2 ) == ublas::detail::retrieve_extents( uexpr2 ) ); + BOOST_CHECK( ublas::detail::retrieve_extents( uexpr1 ) != ublas::detail::retrieve_extents( uexpr2 ) ); + + if constexpr( size1 == size2 ){ + // bexpr_uexpr = (s1+1) + (2+s2) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_uexpr ) == ublas::detail::retrieve_extents(s1) ); + + + // bexpr_bexpr_uexpr = ((s1+1) + (2+s2)) - s2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s2, bminus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) == ublas::detail::retrieve_extents(s2) ); + + // bexpr_bexpr_uexpr = s2 - ((s1+1) + (2+s2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( s2, bexpr_uexpr, bminus ); + + BOOST_CHECK( ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) == ublas::detail::retrieve_extents(s2) ); + } + + }); + }); +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_expression_all_extents_equal, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto uplus1 = [](auto const& a){return a + value_t(1); }; + auto uplus2 = [](auto const& a){return value_t(2) + a; }; + auto bplus = std::plus {}; + auto bminus = std::minus{}; + + for_each_in_tuple(extents, [&](auto const& /*unused*/, auto& e){ + static constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using subtensor = typename tensor_t::subtensor_type; + + auto t = tensor_t(e); + auto v = value_t{}; + for(auto& tt: t){ tt = v; v+=value_t{1}; } + + auto s = subtensor(t); + + BOOST_CHECK( ublas::detail::all_extents_equal( s , e ) ); + + + // uexpr1 = s+1 + // uexpr2 = 2+s + auto uexpr1 = ublas::detail::make_unary_tensor_expression( s, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( s, uplus2 ); + + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, e ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, e ) ); + + // bexpr_uexpr = (s+1) + (2+s) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + + BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_uexpr, e ) ); + + + // bexpr_bexpr_uexpr = ((s+1) + (2+s)) - s + auto bexpr_bexpr_uexpr = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s, bminus ); + + BOOST_CHECK( ublas::detail::all_extents_equal( bexpr_bexpr_uexpr , e ) ); + + }); + + + for_each_in_tuple(extents, [&](auto I, auto& e1){ + + if ( I >= std::tuple_size_v - 1){ + return; + } + + static constexpr auto size1 = std::tuple_size_v>; + using tensor_type1 = ublas::tensor_static_rank; + using subtensor_type1 = typename tensor_type1::subtensor_type; + + for_each_in_tuple(extents, [&](auto J, auto& e2){ + + if( J != I + 1 ){ + return; + } + + + static constexpr auto size2 = std::tuple_size_v>; + using tensor_type2 = ublas::tensor_static_rank; + using subtensor_type2 = typename tensor_type2::subtensor_type; + + auto v = value_t{}; + + tensor_type1 t1(e1); + for(auto& tt: t1){ tt = v; v+=value_t{1}; } + + tensor_type2 t2(e2); + for(auto& tt: t2){ tt = v; v+=value_t{2}; } + + auto s1 = subtensor_type1(t1); + auto s2 = subtensor_type2(t2); + + BOOST_CHECK( ublas::detail::all_extents_equal( t1, ublas::detail::retrieve_extents(t1) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( t2, ublas::detail::retrieve_extents(t2) ) ); + + // uexpr1 = s1+1 + // uexpr2 = 2+s2 + auto uexpr1 = ublas::detail::make_unary_tensor_expression( s1, uplus1 ); + auto uexpr2 = ublas::detail::make_unary_tensor_expression( s2, uplus2 ); + + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr1, ublas::detail::retrieve_extents(uexpr1) ) ); + BOOST_CHECK( ublas::detail::all_extents_equal( uexpr2, ublas::detail::retrieve_extents(uexpr2) ) ); + + if constexpr( size1 == size2 ){ + // bexpr_uexpr = (t1+1) + (2+s2) + auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); + + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr, ublas::detail::retrieve_extents( bexpr_uexpr ) ) ); + + // bexpr_bexpr_uexpr = ((t1+1) + (2+s2)) - s2 + auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, s2, bminus ); + + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr1, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr1 ) ) ); + + // bexpr_bexpr_uexpr = s2 - ((t1+1) + (2+s2)) + auto bexpr_bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( s2, bexpr_uexpr, bminus ); + + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr2 ) ) ); + + + // bexpr_uexpr2 = (t1+1) + s2 + auto bexpr_uexpr2 = ublas::detail::make_binary_tensor_expression( uexpr1, s2, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_uexpr2, ublas::detail::retrieve_extents( bexpr_uexpr2 ) ) ); + + + // bexpr_uexpr2 = ((t1+1) + s2) + t1 + auto bexpr_bexpr_uexpr3 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr2, t1, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr3, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr3 ) ) ); + + // bexpr_uexpr2 = t1 + (((t1+1) + s2) + t1) + auto bexpr_bexpr_uexpr4 = ublas::detail::make_binary_tensor_expression( t1, bexpr_bexpr_uexpr3, bplus ); + BOOST_CHECK( ! ublas::detail::all_extents_equal( bexpr_bexpr_uexpr4, ublas::detail::retrieve_extents( bexpr_bexpr_uexpr4 ) ) ); + } + + }); + }); + +} + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_static_rank_operators_arithmetic.cpp b/test/tensor/test_subtensor_static_rank_operators_arithmetic.cpp new file mode 100644 index 000000000..4f49c70e5 --- /dev/null +++ b/test/tensor/test_subtensor_static_rank_operators_arithmetic.cpp @@ -0,0 +1,255 @@ +// +// Copyright (c) 2018-2020, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019-2020, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + + +#include + +#include +#include +#include "utility.hpp" + +BOOST_AUTO_TEST_SUITE(test_subtensor_static_rank_arithmetic_operations) + +using double_extended = boost::multiprecision::cpp_bin_float_double_extended; + +using test_types = zip::with_t; + +struct fixture +{ + template + using extents_t = boost::numeric::ublas::extents; + + std::tuple< + extents_t<2>, // 1 + extents_t<2>, // 2 + extents_t<3>, // 3 + extents_t<3>, // 4 + extents_t<4> // 5 + > extents = { + extents_t<2>{1,1}, + extents_t<2>{2,3}, + extents_t<3>{4,1,3}, + extents_t<3>{4,2,3}, + extents_t<4>{4,2,3,5} + }; +}; + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_binary_arithmetic_operations, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using subtensor = typename tensor_t::subtensor_type; + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto r = tensor_t (e); + auto s = subtensor(subtensor(t)); + auto s2 = subtensor(t2); + auto v = value_t {}; + + BOOST_CHECK_EQUAL(t.size(), s.size()); + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + r = s + s + s + s2; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 3*s(i) + s2(i) ); + + + r = s2 / (s+3) * (s+1) - s2; // r = ( s2/ ((s+3)*(s+1)) ) - s2 + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), s2(i) / (s(i)+3)*(s(i)+1) - s2(i) ); + + r = 3+s2 / (s+3) * (s+1) * s - s2; // r = 3+( s2/ ((s+3)*(s+1)*s) ) - s2 + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 3+s2(i) / (s(i)+3)*(s(i)+1)*s(i) - s2(i) ); + + r = s2 - s + s2 - s; + + for(auto i = 0ul; i < r.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 4 ); + + + r = s * s * s * s2; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), s(i)*s(i)*s(i)*s2(i) ); + + r = (s2/s2) * (s2/s2); + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 1 ); + }; + + for_each_in_tuple(extents,check); +} + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_unary_arithmetic_operations, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using subtensor = typename tensor_t::subtensor_type; + + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; + auto s = subtensor(t); + auto s2 = subtensor(t2); + BOOST_CHECK_EQUAL(t.size(), s.size()); + + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + tensor_t r1 = s + 2 + s + 2; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r1(i), 2*s(i) + 4 ); + + tensor_t r2 = 2 + s + 2 + s; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r2(i), 2*s(i) + 4 ); + + tensor_t r3 = (s-2) + (s-2); + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r3(i), 2*s(i) - 4 ); + + tensor_t r4 = (s*2) * (3*s); + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r4(i), 2*3*s(i)*s(i) ); + + tensor_t r5 = (s2*2) / (2*s2) * s2; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r5(i), (s2(i)*2) / (2*s2(i)) * s2(i) ); + + tensor_t r6 = (s2/2+1) / (2/s2+1) / s2; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r6(i), (s2(i)/2+1) / (2/s2(i)+1) / s2(i) ); + + }; + + for_each_in_tuple(extents,check); +} + + + + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_assign_arithmetic_operations, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + + auto check = [](auto const& /*unused*/, auto& e) + { + constexpr auto size = std::tuple_size_v>; + using tensor_t = ublas::tensor_static_rank; + using subtensor = typename tensor_t::subtensor_type; + + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto r = tensor_t (e); + auto v = value_t {}; + auto s = subtensor(t); + auto s2 = subtensor(t2); + BOOST_CHECK_EQUAL(t.size(), s.size()); + + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + r = s + 2; + r += s; + r += 2; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*s(i) + 4 ); + + r = 2 + s; + r += s; + r += 2; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*s(i) + 4 ); + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*s(i) + 4 ); + + r = (s-2); + r += s; + r -= 2; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*s(i) - 4 ); + + r = (s*2); + r *= 3; + r *= s; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), 2*3*s(i)*s(i) ); + + r = (s2*2); + r /= 2; + r /= s2; + r *= s2; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), (s2(i)*2) / (2*s2(i)) * s2(i) ); + + r = (s2/2+1); + r /= (2/s2+1); + r /= s2; + + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( r(i), (s2(i)/2+1) / (2/s2(i)+1) / s2(i) ); + + tensor_t q = -r; + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( q(i), -r(i) ); + + tensor_t p = +r; + for(auto i = 0ul; i < s.size(); ++i) + BOOST_CHECK_EQUAL ( p(i), r(i) ); + }; + + for_each_in_tuple(extents,check); +} + + +BOOST_AUTO_TEST_SUITE_END() diff --git a/test/tensor/test_subtensor_static_rank_operators_comparison.cpp b/test/tensor/test_subtensor_static_rank_operators_comparison.cpp new file mode 100644 index 000000000..4e5c4f2c8 --- /dev/null +++ b/test/tensor/test_subtensor_static_rank_operators_comparison.cpp @@ -0,0 +1,203 @@ +// +// Copyright (c) 2018, Cem Bassoy, cem.bassoy@gmail.com +// Copyright (c) 2019, Amit Singh, amitsingh19975@gmail.com +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// The authors gratefully acknowledge the support of +// Google and Fraunhofer IOSB, Ettlingen, Germany +// + + + +#include +#include +#include +#include "utility.hpp" + +BOOST_AUTO_TEST_SUITE(test_subtensor_static_rank_comparison) + +using double_extended = boost::multiprecision::cpp_bin_float_double_extended; + +using test_types = zip::with_t; + +struct fixture { + template + using extents_t = boost::numeric::ublas::extents; + + std::tuple< + extents_t<2>, // 1 + extents_t<2>, // 2 + extents_t<3>, // 3 + extents_t<3>, // 4 + extents_t<4> // 5 + > extents = { + extents_t<2>{1,1}, + extents_t<2>{2,3}, + extents_t<3>{4,1,3}, + extents_t<3>{4,2,3}, + extents_t<4>{4,2,3,5} + }; +}; + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_comparison, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + auto check = [](auto const& /*unused*/, auto& e) + { + using extents_t = std::decay_t; + using tensor_t = ublas::tensor_static_rank, layout_t>; + using subtensor = typename tensor_t::subtensor_type; + + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; + auto s = subtensor(t); + + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + BOOST_CHECK( s == s ); + BOOST_CHECK( s != t2 ); + + if(s.empty()) + return; + + BOOST_CHECK(!(s < s)); + BOOST_CHECK(!(s > s)); + BOOST_CHECK( s < t2 ); + BOOST_CHECK( t2 > s ); + BOOST_CHECK( s <= s ); + BOOST_CHECK( s >= s ); + BOOST_CHECK( s <= t2 ); + BOOST_CHECK( t2 >= s ); + BOOST_CHECK( t2 >= t2 ); + BOOST_CHECK( t2 >= s ); + }; + + for_each_in_tuple(extents,check); + +} + + +BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_comparison_with_tensor_expressions, value, test_types, fixture) +{ + namespace ublas = boost::numeric::ublas; + using value_t = typename value::first_type; + using layout_t = typename value::second_type; + + + for_each_in_tuple(extents,[](auto const& /*unused*/, auto& e) { + using extents_t = std::decay_t; + using tensor_t = ublas::tensor_static_rank, layout_t>; + using subtensor = typename tensor_t::subtensor_type; + + auto t = tensor_t (e); + auto t2 = tensor_t (e); + auto v = value_t {}; + auto s = subtensor(t); + + std::iota(t.begin(), t.end(), v); + std::iota(t2.begin(), t2.end(), v+2); + + BOOST_CHECK( s == s ); + BOOST_CHECK( s != t2 ); + + if(s.empty()) + return; + + BOOST_CHECK( !(s < s) ); + BOOST_CHECK( !(s > s) ); + BOOST_CHECK( s < (t2+s) ); + BOOST_CHECK( (t2+s) > s ); + BOOST_CHECK( s <= (s+s) ); + BOOST_CHECK( (s+t2) >= s ); + BOOST_CHECK( (t2+t2+2) >= s); + BOOST_CHECK( 2*t2 > s ); + BOOST_CHECK( s < 2*t2 ); + BOOST_CHECK( 2*t2 > s); + BOOST_CHECK( 2*t2 >= t2 ); + BOOST_CHECK( t2 <= 2*t2); + BOOST_CHECK( 3*t2 >= s ); + }); + + +} + + + +//BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_static_rank_comparison_with_scalar, value, test_types, fixture) +//{ +// namespace ublas = boost::numeric::ublas; +// using value_t = typename value::first_type; +// using layout_t = typename value::second_type; + + +// for_each_in_tuple(extents, [](auto const& /*unused*/, auto& e) { +// using extents_t = std::decay_t; +// using tensor_t = ublas::tensor_static_rank, layout_t>; + +// BOOST_CHECK( tensor_t(e,value_t{2}) == tensor_t(e,value_t{2}) ); +// BOOST_CHECK( tensor_t(e,value_t{2}) != tensor_t(e,value_t{1}) ); + +// if(ublas::empty(e)) +// return; + +// BOOST_CHECK( !(tensor_t(e,2) < 2) ); +// BOOST_CHECK( !(tensor_t(e,2) > 2) ); +// BOOST_CHECK( (tensor_t(e,2) >= 2) ); +// BOOST_CHECK( (tensor_t(e,2) <= 2) ); +// BOOST_CHECK( (tensor_t(e,2) == 2) ); +// BOOST_CHECK( (tensor_t(e,2) != 3) ); + +// BOOST_CHECK( !(2 > tensor_t(e,2)) ); +// BOOST_CHECK( !(2 < tensor_t(e,2)) ); +// BOOST_CHECK( (2 <= tensor_t(e,2)) ); +// BOOST_CHECK( (2 >= tensor_t(e,2)) ); +// BOOST_CHECK( (2 == tensor_t(e,2)) ); +// BOOST_CHECK( (3 != tensor_t(e,2)) ); + +// BOOST_CHECK( !( tensor_t(e,2)+3 < 5) ); +// BOOST_CHECK( !( tensor_t(e,2)+3 > 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 >= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 <= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 == 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+3 != 6) ); + + +// BOOST_CHECK( !( 5 > tensor_t(e,2)+3) ); +// BOOST_CHECK( !( 5 < tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 >= tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 <= tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 5 == tensor_t(e,2)+3) ); +// BOOST_CHECK( ( 6 != tensor_t(e,2)+3) ); + + +// BOOST_CHECK( !( tensor_t(e,2)+tensor_t(e,3) < 5) ); +// BOOST_CHECK( !( tensor_t(e,2)+tensor_t(e,3) > 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) >= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) <= 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) == 5) ); +// BOOST_CHECK( ( tensor_t(e,2)+tensor_t(e,3) != 6) ); + + +// BOOST_CHECK( !( 5 > tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( !( 5 < tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 >= tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 <= tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 5 == tensor_t(e,2)+tensor_t(e,3)) ); +// BOOST_CHECK( ( 6 != tensor_t(e,2)+tensor_t(e,3)) ); + +// }); + +//} + + +BOOST_AUTO_TEST_SUITE_END() From 6232c50f7b1a65ed12ec5b7bd00d3be8ef8702fb Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Mon, 23 Aug 2021 21:24:24 +0530 Subject: [PATCH 38/40] Fix failing tests --- examples/tensor/access_subtensor.cpp | 90 --------------------- include/boost/numeric/ublas/tensor/span.hpp | 15 ++-- test/tensor/test_span.cpp | 17 ++-- 3 files changed, 16 insertions(+), 106 deletions(-) diff --git a/examples/tensor/access_subtensor.cpp b/examples/tensor/access_subtensor.cpp index 8f1861678..0f879824b 100644 --- a/examples/tensor/access_subtensor.cpp +++ b/examples/tensor/access_subtensor.cpp @@ -49,31 +49,6 @@ int main() auto A = t1 (span(1,1,2), span(0,2,2), span()); auto B = subtensor(A); - std::cout << "% --------------------------- " << std::endl; - auto uexpr1 = ublas::detail::make_unary_tensor_expression( B, uplus1 ); - auto uexpr2 = ublas::detail::make_unary_tensor_expression( A, uplus1 ); - for (auto& x: uexpr1.e.extents()) { - std::cout << x << " "; - } - std::cout << std::endl; - for (auto& x: uexpr2.e.extents()) { - std::cout << x << " "; - } - - std::cout << std::endl; - std::cout << "% --------------------------- " << std::endl; - - // bexpr_uexpr = (s1+1) + (2+s2) - auto bexpr_uexpr = ublas::detail::make_binary_tensor_expression( uexpr1, uexpr2, bplus ); - - // bexpr_bexpr_uexpr = ((s1+1) + (2+s2)) - s2 - auto bexpr_bexpr_uexpr1 = ublas::detail::make_binary_tensor_expression( bexpr_uexpr, B, bminus ); - - auto ext = ublas::detail::retrieve_extents(bexpr_bexpr_uexpr1); - for (auto& x: ext) { - std::cout << x << " "; - } - std::cout << std::endl; tensor t2 = ones(2,2,2) + A + B; auto t3 = ublas::inner_prod(B, t2); @@ -87,69 +62,4 @@ int main() std::cerr << "Cought exception " << e.what(); std::cerr << " in the main function of access-tensor." << std::endl; } - - // try { - // using value = std::complex; - // using layout = ublas::layout::last_order; // storage format - // using tensor = ublas::tensor_dynamic; - // using shape = typename tensor::extents_type; - // using span = ublas::span<>; - // constexpr auto zeros = ublas::zeros{}; - - - // // creates a four-dimensional tensor with extents 5,4,3 and 2 - // // tensor A stores complex floating-point extended double precision numbers - // // according to the last-order storage format - // // and initializes it with the default value. - - // //NOLINTNEXTLINE - // tensor t1 = zeros(5,4,3,2); - // auto B = t1(span(), span(), span(), span()); - - // // initializes the tensor with increasing values along the last-index - // // using a single-index - // auto vc = value(0,0); - // for(auto i = 0u; i < B.size(); ++i, vc += value(1,1)) - // B[i] = vc; - - // // formatted output - // std::cout << "% --------------------------- " << std::endl; - // std::cout << "% --------------------------- " << std::endl << std::endl; - // std::cout << "B=" << B << ";" << std::endl << std::endl; - - // auto C = tensor(B.extents()); - // // computes the complex conjugate of elements of B - // // using multi-index notation. - // for(auto i = 0u; i < B.size(0); ++i) - // for(auto j = 0u; j < B.size(1); ++j) - // for(auto k = 0u; k < B.size(2); ++k) - // for(auto l = 0u; l < B.size(3); ++l) - // C.at(i,j,k,l) = std::conj(B.at(i,j,k,l)); - - // std::cout << "% --------------------------- " << std::endl; - // std::cout << "% --------------------------- " << std::endl << std::endl; - // std::cout << "C=" << C << ";" << std::endl << std::endl; - - - // // // computes the complex conjugate of elements of B - // // // using iterators. - // auto D = tensor(B.extents()); - // // // std::transform(B.begin(), B.end(), D.begin(), [](auto const& b){ return std::conj(b); }); - // // std::cout << "% --------------------------- " << std::endl; - // // std::cout << "% --------------------------- " << std::endl << std::endl; - // // std::cout << "D=" << D << ";" << std::endl << std::endl; - - // // reshaping tensors. - // auto new_extents = B.extents().base(); - // std::next_permutation( new_extents.begin(), new_extents.end() ); - // auto E = reshape( D, shape(new_extents) ); - // std::cout << "% --------------------------- " << std::endl; - // std::cout << "% --------------------------- " << std::endl << std::endl; - // std::cout << "E=" << E << ";" << std::endl << std::endl; - - - // } catch (const std::exception& e) { - // std::cerr << "Cought exception " << e.what(); - // std::cerr << "in the main function of access-tensor." << std::endl; - // } } diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 81165c4eb..70ac6c9b1 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -75,11 +75,16 @@ class span , step_ (s) , last_ (l) { - if(s == 0 && f != l) - throw std::runtime_error("Error in span::span : cannot have a step_ equal to zero."); - if(f > l) - throw std::runtime_error("Error in span::span: last_ is smaller than first"); - last_ = l - ((l-f)%s); + if(f == l){ + last_ = l; + step_ = value_type(1); + } else { + if(s == 0 && f != l) + throw std::runtime_error("Error in span::span : cannot have a step_ equal to zero."); + if(f > l) + throw std::runtime_error("Error in span::span: last_ is smaller than first"); + last_ = l - ((l-f)%s); + } } span(span const& other) diff --git a/test/tensor/test_span.cpp b/test/tensor/test_span.cpp index 4a66cb030..6a85444a5 100644 --- a/test/tensor/test_span.cpp +++ b/test/tensor/test_span.cpp @@ -50,7 +50,7 @@ BOOST_FIXTURE_TEST_CASE( ctor_test, fixture ) BOOST_CHECK_EQUAL (spans[1].first(),0); BOOST_CHECK_EQUAL (spans[1].step (),1); BOOST_CHECK_EQUAL (spans[1].last (),4); - BOOST_CHECK_EQUAL (spans[1].size (),4); + BOOST_CHECK_EQUAL (spans[1].size (),5); BOOST_CHECK_EQUAL (spans[2].first(),2); BOOST_CHECK_EQUAL (spans[2].step (),1); @@ -58,7 +58,7 @@ BOOST_FIXTURE_TEST_CASE( ctor_test, fixture ) BOOST_CHECK_EQUAL (spans[2].size (),5); BOOST_CHECK_EQUAL (spans[3].first(),0); - BOOST_CHECK_EQUAL (spans[3].step (),0); + BOOST_CHECK_EQUAL (spans[3].step (),1); BOOST_CHECK_EQUAL (spans[3].last (),0); BOOST_CHECK_EQUAL (spans[3].size (),1); @@ -117,7 +117,7 @@ BOOST_FIXTURE_TEST_CASE( copy_ctor_test, fixture ) BOOST_CHECK_EQUAL (span_type(spans[1]).first(),0); BOOST_CHECK_EQUAL (span_type(spans[1]).step (),1); BOOST_CHECK_EQUAL (span_type(spans[1]).last (),4); - BOOST_CHECK_EQUAL (span_type(spans[1]).size (),4); + BOOST_CHECK_EQUAL (span_type(spans[1]).size (),5); BOOST_CHECK_EQUAL (span_type(spans[2]).first(),2); BOOST_CHECK_EQUAL (span_type(spans[2]).step (),1); @@ -125,7 +125,7 @@ BOOST_FIXTURE_TEST_CASE( copy_ctor_test, fixture ) BOOST_CHECK_EQUAL (span_type(spans[2]).size (),5); BOOST_CHECK_EQUAL (span_type(spans[3]).first(),0); - BOOST_CHECK_EQUAL (span_type(spans[3]).step (),0); + BOOST_CHECK_EQUAL (span_type(spans[3]).step (),1); BOOST_CHECK_EQUAL (span_type(spans[3]).last (),0); BOOST_CHECK_EQUAL (span_type(spans[3]).size (),1); @@ -178,7 +178,7 @@ BOOST_FIXTURE_TEST_CASE( assignment_operator_test, fixture ) BOOST_CHECK_EQUAL ((c1=spans[1]).first(),0); BOOST_CHECK_EQUAL ((c1=spans[1]).step (),1); BOOST_CHECK_EQUAL ((c1=spans[1]).last (),4); - BOOST_CHECK_EQUAL ((c1=spans[1]).size (),4); + BOOST_CHECK_EQUAL ((c1=spans[1]).size (),5); auto c2 = spans[3]; BOOST_CHECK_EQUAL ((c2=spans[2]).first(),2); @@ -188,7 +188,7 @@ BOOST_FIXTURE_TEST_CASE( assignment_operator_test, fixture ) auto c3 = spans[4]; BOOST_CHECK_EQUAL ((c3=spans[3]).first(),0); - BOOST_CHECK_EQUAL ((c3=spans[3]).step (),0); + BOOST_CHECK_EQUAL ((c3=spans[3]).step (),1); BOOST_CHECK_EQUAL ((c3=spans[3]).last (),0); BOOST_CHECK_EQUAL ((c3=spans[3]).size (),1); @@ -230,10 +230,5 @@ BOOST_FIXTURE_TEST_CASE( assignment_operator_test, fixture ) } -BOOST_FIXTURE_TEST_CASE( function_operator_test, fixture ) -{ - -} - BOOST_AUTO_TEST_SUITE_END(); From df2c8642826ddb8231a995e31bb08d3684f3b37a Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Mon, 30 Aug 2021 12:14:38 +0530 Subject: [PATCH 39/40] fix failing clang-tidy checks --- examples/tensor/instantiate_subtensor.cpp | 1 - include/boost/numeric/ublas/tensor/access.hpp | 2 +- include/boost/numeric/ublas/tensor/span.hpp | 10 +++++----- .../boost/numeric/ublas/tensor/subtensor_utility.hpp | 4 ++-- .../numeric/ublas/tensor/tensor/subtensor_dynamic.hpp | 2 +- .../ublas/tensor/tensor/subtensor_static_rank.hpp | 2 +- test/tensor/test_subtensor.cpp | 8 ++++---- test/tensor/test_subtensor_utility.cpp | 2 +- 8 files changed, 15 insertions(+), 16 deletions(-) diff --git a/examples/tensor/instantiate_subtensor.cpp b/examples/tensor/instantiate_subtensor.cpp index a146dcce1..85595b38b 100644 --- a/examples/tensor/instantiate_subtensor.cpp +++ b/examples/tensor/instantiate_subtensor.cpp @@ -1,6 +1,5 @@ #include -using namespace boost::numeric::ublas; void instantiate_subtensor_dynamic() { diff --git a/include/boost/numeric/ublas/tensor/access.hpp b/include/boost/numeric/ublas/tensor/access.hpp index 525282fd9..5980aea04 100644 --- a/include/boost/numeric/ublas/tensor/access.hpp +++ b/include/boost/numeric/ublas/tensor/access.hpp @@ -53,7 +53,7 @@ constexpr inline auto compute_single_index(InputIt1 i, InputIt1 ip, InputIt2 w) template constexpr inline auto compute_single_index(InputIt1 i, InputIt1 /*ip*/, InputIt2 w) { - if constexpr(p==0u) return 0ul; + if constexpr(p==0u) return 0ul; else if constexpr(p >1u) return compute_single_index(i,i,w)+i[p-1]*w[p-1]; else return i[p-1]*w[p-1]; } diff --git a/include/boost/numeric/ublas/tensor/span.hpp b/include/boost/numeric/ublas/tensor/span.hpp index 70ac6c9b1..7bec7a391 100644 --- a/include/boost/numeric/ublas/tensor/span.hpp +++ b/include/boost/numeric/ublas/tensor/span.hpp @@ -102,9 +102,9 @@ class span return *this; } - inline auto first() const {return first_; } - inline auto last () const {return last_ ; } - inline auto step () const {return step_ ; } + [[ nodiscard ]] inline auto first() const {return first_; } + [[ nodiscard ]] inline auto last () const {return last_ ; } + [[ nodiscard ]] inline auto step () const {return step_ ; } ~span() = default; @@ -115,12 +115,12 @@ class span return (last_-first_) / step_ + value_type(1); } -protected: +private: value_type first_, step_, last_ ; }; -} // namespace +} // namespace boost::numeric::ublas template diff --git a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp index f9cb8ef39..d60ca15a9 100644 --- a/include/boost/numeric/ublas/tensor/subtensor_utility.hpp +++ b/include/boost/numeric/ublas/tensor/subtensor_utility.hpp @@ -12,8 +12,8 @@ /// \file subtensor_utility.hpp Definition for the tensor template class -#ifndef _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ -#define _BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ +#ifndef BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ +#define BOOST_NUMERIC_UBLAS_TENSOR_SUBTENSOR_UTILITY_HPP_ #include "extents.hpp" #include "span.hpp" diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index 3dc87fcd9..74dc801e9 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -177,7 +177,7 @@ class tensor_core>> tensor_core& operator=(const_reference v) { for(auto i = 0u; i < this->size(); ++i) - this->at(i) = v; + this->at(i) = v; return *this; } diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp index c40a2b16e..a027a5c93 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -176,7 +176,7 @@ class tensor_core> tensor_core& operator=(const_reference v) { for(auto i = 0u; i < this->size(); ++i) - this->at(i) = v; + this->at(i) = v; return *this; } diff --git a/test/tensor/test_subtensor.cpp b/test/tensor/test_subtensor.cpp index ab4f0131f..b49dffb52 100644 --- a/test/tensor/test_subtensor.cpp +++ b/test/tensor/test_subtensor.cpp @@ -105,7 +105,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE( subtensor_ctor2_test, value, test_types ) BOOST_CHECK( Asub.extents() == A.extents() ); BOOST_CHECK( Asub.data() == A.data() ); - auto Asubsub = subtensor_type( Asub ); + auto Asubsub = subtensor_type( Asub ); BOOST_CHECK( Asubsub.strides() == A.strides() ); BOOST_CHECK( Asubsub.extents() == A.extents() ); @@ -658,9 +658,9 @@ BOOST_FIXTURE_TEST_CASE_TEMPLATE( test_subtensor_read_write_multi_index_access_a auto s = subtensor_type(t); if (t.rank() == 1) check1(s); - else if(t.rank() == 2) check2(s); - else if(t.rank() == 3) check3(s); - else if(t.rank() == 4) check4(s); + else if(t.rank() == 2) check2(s); + else if(t.rank() == 3) check3(s); + else if(t.rank() == 4) check4(s); }; diff --git a/test/tensor/test_subtensor_utility.cpp b/test/tensor/test_subtensor_utility.cpp index fad2d4870..a711964ea 100644 --- a/test/tensor/test_subtensor_utility.cpp +++ b/test/tensor/test_subtensor_utility.cpp @@ -44,7 +44,7 @@ struct fixture_span { BOOST_FIXTURE_TEST_CASE( transform_span_test, fixture_span ) { - using namespace boost::numeric; + namespace ublas = boost::numeric::ublas; // template BOOST_CHECK( ublas::detail::transform_span(spans.at(0), std::size_t(3) ) == ublas::span(0,1,2) ); From c442b927dcb737e9f02072e9d9eb9ec50fcc3f23 Mon Sep 17 00:00:00 2001 From: Kannav Mehta Date: Mon, 30 Aug 2021 17:41:15 +0530 Subject: [PATCH 40/40] fix more clang-tidy warnings --- include/boost/numeric/ublas/tensor/access.hpp | 2 +- .../boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp | 5 ++--- .../numeric/ublas/tensor/tensor/subtensor_static_rank.hpp | 5 ++--- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/include/boost/numeric/ublas/tensor/access.hpp b/include/boost/numeric/ublas/tensor/access.hpp index 5980aea04..202b47f0c 100644 --- a/include/boost/numeric/ublas/tensor/access.hpp +++ b/include/boost/numeric/ublas/tensor/access.hpp @@ -53,7 +53,7 @@ constexpr inline auto compute_single_index(InputIt1 i, InputIt1 ip, InputIt2 w) template constexpr inline auto compute_single_index(InputIt1 i, InputIt1 /*ip*/, InputIt2 w) { - if constexpr(p==0u) return 0ul; + if constexpr(p==0u) return 0ul; else if constexpr(p >1u) return compute_single_index(i,i,w)+i[p-1]*w[p-1]; else return i[p-1]*w[p-1]; } diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp index 74dc801e9..6675d68bb 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_dynamic.hpp @@ -99,9 +99,8 @@ class tensor_core>> explicit tensor_core() = delete; - tensor_core(tensor_type& t) + explicit tensor_core(tensor_type& t) : tensor_expression_type{} - , _spans() , _extents(t.extents()) , _strides(t.strides()) , _span_strides(t.strides()) @@ -135,7 +134,7 @@ class tensor_core>> , _data (t._data) {} - tensor_core(tensor_core&& v) + tensor_core(tensor_core&& v) noexcept : tensor_expression_type{} , _spans (std::move(v._spans)) , _extents(std::move(v._extents)) diff --git a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp index a027a5c93..b5fa2a9c4 100644 --- a/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp +++ b/include/boost/numeric/ublas/tensor/tensor/subtensor_static_rank.hpp @@ -99,9 +99,8 @@ class tensor_core> explicit tensor_core() = delete; - tensor_core(tensor_type& t) + explicit tensor_core(tensor_type& t) : tensor_expression_type{} - , _spans() , _extents(t.extents()) , _strides(t.strides()) , _span_strides(t.strides()) @@ -135,7 +134,7 @@ class tensor_core> , _data (t._data) {} - tensor_core(tensor_core&& v) + tensor_core(tensor_core&& v) noexcept : tensor_expression_type{} , _spans (std::move(v._spans)) , _extents(std::move(v._extents))