From 5f1561bc280f41d698513fae281dda3a29c793d3 Mon Sep 17 00:00:00 2001 From: chekalexey Date: Wed, 8 Jan 2025 16:27:04 +0300 Subject: [PATCH 01/24] Add layer examples --- app/layer_example/CMakeLists.txt | 9 ++++++ app/layer_example/ConvolutionLayer.cpp | 40 ++++++++++++++++++++++++++ app/layer_example/ElementwiseLayer.cpp | 31 ++++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 app/layer_example/CMakeLists.txt create mode 100644 app/layer_example/ConvolutionLayer.cpp create mode 100644 app/layer_example/ElementwiseLayer.cpp diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt new file mode 100644 index 0000000..8eec088 --- /dev/null +++ b/app/layer_example/CMakeLists.txt @@ -0,0 +1,9 @@ +set(ARM_DIR "/3rdparty/ComputeLibrary") + +include_directories(${ARM_DIR}/include) +link_directories(${ARM_DIR}/build) + +add_executable(ConvolutionLayer ConvolutionLayer.cpp) +add_executable(ElementwiseLayer ElementwiseLayer.cpp) + +target_link_libraries(ConvolutionLayer arm_compute) \ No newline at end of file diff --git a/app/layer_example/ConvolutionLayer.cpp b/app/layer_example/ConvolutionLayer.cpp new file mode 100644 index 0000000..377b320 --- /dev/null +++ b/app/layer_example/ConvolutionLayer.cpp @@ -0,0 +1,40 @@ +#include "../ComputeLibrary/arm_compute/runtime/NEON/NEFunctions.h" +#include "../ComputeLibrary/utils/Utils.h" + +#include +using namespace arm_compute; +using namespace utils; + +int main(){ + Tensor input; + Tensor weight; + Tensor bias; + Tensor output; + + const unsigned int N = 1; + const unsigned int Hin = 3; + const unsigned int Win = 3; + const unsigned int Cin = 1; + + const unsigned int Hf = 3; + const unsigned int Wf = 3; + + const unsigned int Hout = Hin - Hf + 1; + const unsigned int Wout = Win - Wf + 1; + const unsigned int Cout = 1; + + input.allocator()->init(TensorInfo(TensorShape(Hin, Win, Cin), 1, DataType::F32)); + weight.allocator()->init(TensorInfo(TensorShape(Hf, Wf, Cin, Cout), 1, DataType::F32)); + output.allocator()->init(TensorInfo(TensorShape(Hout, Wout, Cout), 1, DataType::F32)); + + input.allocator()->allocate(); + weight.allocator()->allocate(); + output.allocator()->allocate(); + + NEConvolutionLayer conv; + conv.configure(&input, &weight, nullptr, &output, PadStrideInfo(1, 1, 0, 0)); + + conv.run(); + + output.print(std::cout); +} \ No newline at end of file diff --git a/app/layer_example/ElementwiseLayer.cpp b/app/layer_example/ElementwiseLayer.cpp new file mode 100644 index 0000000..eeeb30d --- /dev/null +++ b/app/layer_example/ElementwiseLayer.cpp @@ -0,0 +1,31 @@ +#include "../ComputeLibrary/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h" +#include "../ComputeLibrary/utils/Utils.h" + +#include +using namespace arm_compute; +using namespace utils; + +int main() { + const int input_width = 5; + const int input_height = 5; + + Tensor input1, input2, output; + + input1.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + input2.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + output.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + + input1.allocator()->allocate(); + input2.allocator()->allocate(); + output.allocator()->allocate(); + + fill_random_tensor(input1, 0.f, 1.f); + fill_random_tensor(input2, 0.f, 1.f); + + NEElementwiseSquaredDiff elementwise; + elementwise.configure(&input1, &input2, &output); + + elementwise.run(); + + output.print(std::cout); +} From 58a016c3d4a0e8acb7d0363fd8ef0f4ffe0512fd Mon Sep 17 00:00:00 2001 From: chekalexey Date: Wed, 8 Jan 2025 18:09:40 +0300 Subject: [PATCH 02/24] Add layer examples --- app/CMakeLists.txt | 1 + app/example/CMakeLists.txt | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/app/CMakeLists.txt b/app/CMakeLists.txt index a5c4dce..b704bc8 100644 --- a/app/CMakeLists.txt +++ b/app/CMakeLists.txt @@ -1 +1,2 @@ add_subdirectory(example) +add_subdirectory(layer_example) \ No newline at end of file diff --git a/app/example/CMakeLists.txt b/app/example/CMakeLists.txt index 033d285..5ac1ad8 100644 --- a/app/example/CMakeLists.txt +++ b/app/example/CMakeLists.txt @@ -1 +1 @@ -add_executable(example main.cpp) +add_executable(example main.cpp) \ No newline at end of file From a19ca04da31733cfc523d07c51d320fe70aeebbf Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 11 Feb 2025 21:28:24 +0300 Subject: [PATCH 03/24] fixed --- app/layer_example/CMakeLists.txt | 7 ++++--- app/layer_example/ConvolutionLayer.cpp | 4 ++-- app/layer_example/ElementwiseLayer.cpp | 4 ++-- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index 8eec088..b89ef42 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -1,9 +1,10 @@ -set(ARM_DIR "/3rdparty/ComputeLibrary") +set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary") -include_directories(${ARM_DIR}/include) +include_directories(${ARM_DIR}) link_directories(${ARM_DIR}/build) add_executable(ConvolutionLayer ConvolutionLayer.cpp) add_executable(ElementwiseLayer ElementwiseLayer.cpp) -target_link_libraries(ConvolutionLayer arm_compute) \ No newline at end of file +target_link_libraries(ConvolutionLayer arm_compute) +target_link_libraries(ElementwiseLayer arm_compute) \ No newline at end of file diff --git a/app/layer_example/ConvolutionLayer.cpp b/app/layer_example/ConvolutionLayer.cpp index 377b320..618629b 100644 --- a/app/layer_example/ConvolutionLayer.cpp +++ b/app/layer_example/ConvolutionLayer.cpp @@ -1,5 +1,5 @@ -#include "../ComputeLibrary/arm_compute/runtime/NEON/NEFunctions.h" -#include "../ComputeLibrary/utils/Utils.h" +#include "ComputeLibrary/arm_compute/runtime/NEON/NEFunctions.h" +#include "ComputeLibrary/utils/Utils.h" #include using namespace arm_compute; diff --git a/app/layer_example/ElementwiseLayer.cpp b/app/layer_example/ElementwiseLayer.cpp index eeeb30d..3522621 100644 --- a/app/layer_example/ElementwiseLayer.cpp +++ b/app/layer_example/ElementwiseLayer.cpp @@ -1,5 +1,5 @@ -#include "../ComputeLibrary/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h" -#include "../ComputeLibrary/utils/Utils.h" +#include "ComputeLibrary/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h" +#include "ComputeLibrary/utils/Utils.h" #include using namespace arm_compute; From 6f75b736bfd71e6d0379768a2f6e70a81d0f48ef Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 25 Feb 2025 12:25:52 +0300 Subject: [PATCH 04/24] correction of inclusions --- app/layer_example/CMakeLists.txt | 6 +++--- app/layer_example/ConvolutionLayer.cpp | 4 ++-- app/layer_example/ElementwiseLayer.cpp | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index b89ef42..f51b953 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -1,10 +1,10 @@ set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary") -include_directories(${ARM_DIR}) -link_directories(${ARM_DIR}/build) - add_executable(ConvolutionLayer ConvolutionLayer.cpp) add_executable(ElementwiseLayer ElementwiseLayer.cpp) +include_directories(${ARM_DIR}) +link_directories(${ARM_DIR}/build) + target_link_libraries(ConvolutionLayer arm_compute) target_link_libraries(ElementwiseLayer arm_compute) \ No newline at end of file diff --git a/app/layer_example/ConvolutionLayer.cpp b/app/layer_example/ConvolutionLayer.cpp index 618629b..60ed50a 100644 --- a/app/layer_example/ConvolutionLayer.cpp +++ b/app/layer_example/ConvolutionLayer.cpp @@ -1,5 +1,5 @@ -#include "ComputeLibrary/arm_compute/runtime/NEON/NEFunctions.h" -#include "ComputeLibrary/utils/Utils.h" +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "utils/Utils.h" #include using namespace arm_compute; diff --git a/app/layer_example/ElementwiseLayer.cpp b/app/layer_example/ElementwiseLayer.cpp index 3522621..05a60f6 100644 --- a/app/layer_example/ElementwiseLayer.cpp +++ b/app/layer_example/ElementwiseLayer.cpp @@ -1,5 +1,5 @@ -#include "ComputeLibrary/arm_compute/runtime/NEON/functions/NEElementwiseOperations.h" -#include "ComputeLibrary/utils/Utils.h" +#include "arm_compute/runtime/NEON/functions/NEElementwiseOperations.h" +#include "utils/Utils.h" #include using namespace arm_compute; From 968c92a790760926332827307f2101d4359fb3ae Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 25 Feb 2025 16:21:25 +0300 Subject: [PATCH 05/24] Update cmakelist --- app/layer_example/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index f51b953..5e80638 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -4,6 +4,7 @@ add_executable(ConvolutionLayer ConvolutionLayer.cpp) add_executable(ElementwiseLayer ElementwiseLayer.cpp) include_directories(${ARM_DIR}) +include_directories(${ARM_DIR/include}) link_directories(${ARM_DIR}/build) target_link_libraries(ConvolutionLayer arm_compute) From 7c87d3d8b04ab5c3000cbb0f6fe79246a343ab19 Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 11 Mar 2025 15:19:27 +0300 Subject: [PATCH 06/24] Add a ElementwiseLayer Example --- app/layer_example/ElementwiseLayer.cpp | 73 +++++++++++++++++++++----- 1 file changed, 59 insertions(+), 14 deletions(-) diff --git a/app/layer_example/ElementwiseLayer.cpp b/app/layer_example/ElementwiseLayer.cpp index 05a60f6..6ac7794 100644 --- a/app/layer_example/ElementwiseLayer.cpp +++ b/app/layer_example/ElementwiseLayer.cpp @@ -1,31 +1,76 @@ -#include "arm_compute/runtime/NEON/functions/NEElementwiseOperations.h" +#include "arm_compute/runtime/NEON/NEFunctions.h" #include "utils/Utils.h" #include using namespace arm_compute; using namespace utils; -int main() { +class ElementwiseLayer { const int input_width = 5; const int input_height = 5; Tensor input1, input2, output; - input1.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - input2.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - output.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); +public: + void fill() { + input1.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + input2.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + output.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + + input1.allocator()->allocate(); + input2.allocator()->allocate(); + output.allocator()->allocate(); - input1.allocator()->allocate(); - input2.allocator()->allocate(); - output.allocator()->allocate(); + fill_random_tensor(input1, 0.f, 1.f); + fill_random_tensor(input2, 0.f, 1.f); + } - fill_random_tensor(input1, 0.f, 1.f); - fill_random_tensor(input2, 0.f, 1.f); + void SquaredDiff() { + NEElementwiseSquaredDiff elementwise; + elementwise.configure(&input1, &input2, &output); + elementwise.run(); + } + + void Division() { + NEElementwiseDivision elementwise; + elementwise.configure(&input1, &input2, &output); + elementwise.run(); + } + + void Addition() { + NEArithmeticAddition add; + add.configure(&input1, &input2, &output); + add.run(); + } + + void Swish() { + NEActivationLayer act; + act.configure(&input1, &input2, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SWISH)); + act.run(); + } + + void Abs() { + NEActivationLayer act; + act.configure(&input1, &input2, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::ABS)); + act.run(); + } - NEElementwiseSquaredDiff elementwise; - elementwise.configure(&input1, &input2, &output); + void Sigmoid() { + NEActivationLayer act; + act.configure(&input1, &input2, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)); + act.run(); + } - elementwise.run(); + void print() { + output.print(std::cout); + } +}; + +int main() { + ElementwiseLayer a; + a.fill(); + a.Addition(); + a.print(); - output.print(std::cout); + return 0; } From 9bfa382fe3cd43b60ea718e1d0cb3ed23aec2033 Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 18 Mar 2025 12:47:59 +0300 Subject: [PATCH 07/24] Add a pooling layer example --- app/layer_example/PoolingLayer.cpp | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 app/layer_example/PoolingLayer.cpp diff --git a/app/layer_example/PoolingLayer.cpp b/app/layer_example/PoolingLayer.cpp new file mode 100644 index 0000000..885e63f --- /dev/null +++ b/app/layer_example/PoolingLayer.cpp @@ -0,0 +1,28 @@ +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "utils/Utils.h" + +#include +using namespace arm_compute; +using namespace utils; + +int main() { + Tensor input; + Tensor output; + + const int input_width = 5; + const int input_height = 5; + + input.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + output.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + fill_random_tensor(input, 0.f, 1.f); + + NEPoolingLayer pool; + pool.configure(&input, &output, PoolingLayerInfo(PoolingType::MAX, DataLayout::NHWC)); + pool.run(); + + output.print(std::cout); +} \ No newline at end of file From f02551d3b7f32c152f8974c842620f57c5739cd0 Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 25 Mar 2025 15:22:17 +0300 Subject: [PATCH 08/24] add a MatmulLayer example --- app/layer_example/MatMulLayer.cpp | 32 +++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 app/layer_example/MatMulLayer.cpp diff --git a/app/layer_example/MatMulLayer.cpp b/app/layer_example/MatMulLayer.cpp new file mode 100644 index 0000000..8844df8 --- /dev/null +++ b/app/layer_example/MatMulLayer.cpp @@ -0,0 +1,32 @@ +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "utils/Utils.h" + +#include +using namespace arm_compute; +using namespace utils; + +int main() { + Tensor input1; + Tensor input2; + Tensor output; + + const int input_width = 5; + const int input_height = 5; + + input1.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + input2.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + output.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + + input1.allocator()->allocate(); + input2.allocator()->allocate(); + output.allocator()->allocate(); + + fill_random_tensor(input1, 0.f, 1.f); + fill_random_tensor(input2, 0.f, 1.f); + + NEMatMulLayer m; + m.configure(&input1, &input2, &output); + m.run(); + + output.print(std::cout); +} \ No newline at end of file From f81c4bc62be23699fc3e7f1684debdc53ffec58c Mon Sep 17 00:00:00 2001 From: ChekAlexey <144238010+chekalexey@users.noreply.github.com> Date: Tue, 25 Mar 2025 15:39:20 +0300 Subject: [PATCH 09/24] Delete ElementwiseLayer.cpp --- app/layer_example/ElementwiseLayer.cpp | 76 -------------------------- 1 file changed, 76 deletions(-) delete mode 100644 app/layer_example/ElementwiseLayer.cpp diff --git a/app/layer_example/ElementwiseLayer.cpp b/app/layer_example/ElementwiseLayer.cpp deleted file mode 100644 index 6ac7794..0000000 --- a/app/layer_example/ElementwiseLayer.cpp +++ /dev/null @@ -1,76 +0,0 @@ -#include "arm_compute/runtime/NEON/NEFunctions.h" -#include "utils/Utils.h" - -#include -using namespace arm_compute; -using namespace utils; - -class ElementwiseLayer { - const int input_width = 5; - const int input_height = 5; - - Tensor input1, input2, output; - -public: - void fill() { - input1.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - input2.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - output.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - - input1.allocator()->allocate(); - input2.allocator()->allocate(); - output.allocator()->allocate(); - - fill_random_tensor(input1, 0.f, 1.f); - fill_random_tensor(input2, 0.f, 1.f); - } - - void SquaredDiff() { - NEElementwiseSquaredDiff elementwise; - elementwise.configure(&input1, &input2, &output); - elementwise.run(); - } - - void Division() { - NEElementwiseDivision elementwise; - elementwise.configure(&input1, &input2, &output); - elementwise.run(); - } - - void Addition() { - NEArithmeticAddition add; - add.configure(&input1, &input2, &output); - add.run(); - } - - void Swish() { - NEActivationLayer act; - act.configure(&input1, &input2, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SWISH)); - act.run(); - } - - void Abs() { - NEActivationLayer act; - act.configure(&input1, &input2, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::ABS)); - act.run(); - } - - void Sigmoid() { - NEActivationLayer act; - act.configure(&input1, &input2, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)); - act.run(); - } - - void print() { - output.print(std::cout); - } -}; - -int main() { - ElementwiseLayer a; - a.fill(); - a.Addition(); - a.print(); - - return 0; -} From 87764d6bc232955d5f0b60c8c957cacd932f4ff0 Mon Sep 17 00:00:00 2001 From: ChekAlexey <144238010+chekalexey@users.noreply.github.com> Date: Tue, 25 Mar 2025 15:39:33 +0300 Subject: [PATCH 10/24] Delete ConvolutionLayer.cpp --- app/layer_example/ConvolutionLayer.cpp | 40 -------------------------- 1 file changed, 40 deletions(-) delete mode 100644 app/layer_example/ConvolutionLayer.cpp diff --git a/app/layer_example/ConvolutionLayer.cpp b/app/layer_example/ConvolutionLayer.cpp deleted file mode 100644 index 60ed50a..0000000 --- a/app/layer_example/ConvolutionLayer.cpp +++ /dev/null @@ -1,40 +0,0 @@ -#include "arm_compute/runtime/NEON/NEFunctions.h" -#include "utils/Utils.h" - -#include -using namespace arm_compute; -using namespace utils; - -int main(){ - Tensor input; - Tensor weight; - Tensor bias; - Tensor output; - - const unsigned int N = 1; - const unsigned int Hin = 3; - const unsigned int Win = 3; - const unsigned int Cin = 1; - - const unsigned int Hf = 3; - const unsigned int Wf = 3; - - const unsigned int Hout = Hin - Hf + 1; - const unsigned int Wout = Win - Wf + 1; - const unsigned int Cout = 1; - - input.allocator()->init(TensorInfo(TensorShape(Hin, Win, Cin), 1, DataType::F32)); - weight.allocator()->init(TensorInfo(TensorShape(Hf, Wf, Cin, Cout), 1, DataType::F32)); - output.allocator()->init(TensorInfo(TensorShape(Hout, Wout, Cout), 1, DataType::F32)); - - input.allocator()->allocate(); - weight.allocator()->allocate(); - output.allocator()->allocate(); - - NEConvolutionLayer conv; - conv.configure(&input, &weight, nullptr, &output, PadStrideInfo(1, 1, 0, 0)); - - conv.run(); - - output.print(std::cout); -} \ No newline at end of file From 2561b5eea8d966626e7060c6ccc944bf64cf2c16 Mon Sep 17 00:00:00 2001 From: ChekAlexey <144238010+chekalexey@users.noreply.github.com> Date: Tue, 25 Mar 2025 15:39:48 +0300 Subject: [PATCH 11/24] Delete PoolingLayer.cpp --- app/layer_example/PoolingLayer.cpp | 28 ---------------------------- 1 file changed, 28 deletions(-) delete mode 100644 app/layer_example/PoolingLayer.cpp diff --git a/app/layer_example/PoolingLayer.cpp b/app/layer_example/PoolingLayer.cpp deleted file mode 100644 index 885e63f..0000000 --- a/app/layer_example/PoolingLayer.cpp +++ /dev/null @@ -1,28 +0,0 @@ -#include "arm_compute/runtime/NEON/NEFunctions.h" -#include "utils/Utils.h" - -#include -using namespace arm_compute; -using namespace utils; - -int main() { - Tensor input; - Tensor output; - - const int input_width = 5; - const int input_height = 5; - - input.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - output.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - - input.allocator()->allocate(); - output.allocator()->allocate(); - - fill_random_tensor(input, 0.f, 1.f); - - NEPoolingLayer pool; - pool.configure(&input, &output, PoolingLayerInfo(PoolingType::MAX, DataLayout::NHWC)); - pool.run(); - - output.print(std::cout); -} \ No newline at end of file From daaa4729c0837dc5b4c01bfca7b5fc974f4ee1fc Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 25 Mar 2025 15:43:42 +0300 Subject: [PATCH 12/24] Update CMakeLists --- app/layer_example/CMakeLists.txt | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index 5e80638..3864154 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -1,11 +1,9 @@ set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary") -add_executable(ConvolutionLayer ConvolutionLayer.cpp) -add_executable(ElementwiseLayer ElementwiseLayer.cpp) +add_executable(MatMul MatMulLayerLayer.cpp) include_directories(${ARM_DIR}) -include_directories(${ARM_DIR/include}) +include_directories(${ARM_DIR}/include) link_directories(${ARM_DIR}/build) -target_link_libraries(ConvolutionLayer arm_compute) -target_link_libraries(ElementwiseLayer arm_compute) \ No newline at end of file +target_link_libraries(MatMul arm_compute) From aa8d4f1f44139891c9153c6e17a0e3c6cba2eb2d Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 25 Mar 2025 15:49:21 +0300 Subject: [PATCH 13/24] Update CMakeLists --- app/layer_example/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index 3864154..23d91c1 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -6,4 +6,4 @@ include_directories(${ARM_DIR}) include_directories(${ARM_DIR}/include) link_directories(${ARM_DIR}/build) -target_link_libraries(MatMul arm_compute) +target_link_libraries(MatMul arm_compute) \ No newline at end of file From 2e2fe442c71d5a62ac423f0907b63a9279efbfbc Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 25 Mar 2025 17:15:03 +0300 Subject: [PATCH 14/24] Update Cmakelist --- app/layer_example/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index 23d91c1..d7aa276 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -1,6 +1,6 @@ set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary") -add_executable(MatMul MatMulLayerLayer.cpp) +add_executable(MatMul MatMulLayer.cpp) include_directories(${ARM_DIR}) include_directories(${ARM_DIR}/include) From e874928a9b37e3bf08119c9e119a94aa12d7f558 Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 25 Mar 2025 17:43:19 +0300 Subject: [PATCH 15/24] correction --- app/layer_example/MatMulLayer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/layer_example/MatMulLayer.cpp b/app/layer_example/MatMulLayer.cpp index 8844df8..ce50647 100644 --- a/app/layer_example/MatMulLayer.cpp +++ b/app/layer_example/MatMulLayer.cpp @@ -24,8 +24,8 @@ int main() { fill_random_tensor(input1, 0.f, 1.f); fill_random_tensor(input2, 0.f, 1.f); - NEMatMulLayer m; - m.configure(&input1, &input2, &output); + NEMatMul m; + m.configure(&input1, &input2, &output, MatMulInfo(), CpuMatMulSettings(), ActivationLayerInfo()); m.run(); output.print(std::cout); From 0cd542590d3bc4dc6dc6768044c6cae6ac0ab54a Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 15 Apr 2025 11:33:25 +0300 Subject: [PATCH 16/24] Update CMakeLists --- app/layer_example/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index d7aa276..843f187 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -4,6 +4,6 @@ add_executable(MatMul MatMulLayer.cpp) include_directories(${ARM_DIR}) include_directories(${ARM_DIR}/include) -link_directories(${ARM_DIR}/build) +target_link_directories(MatMul PUBLIC ${ARM_DIR}/build) target_link_libraries(MatMul arm_compute) \ No newline at end of file From d7cf92693c776804175f81fad8f7ddcdef4ac740 Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 22 Apr 2025 15:11:09 +0300 Subject: [PATCH 17/24] Add dependencies in CMakeLists --- app/layer_example/CMakeLists.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index 843f187..c71fbd3 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -6,4 +6,6 @@ include_directories(${ARM_DIR}) include_directories(${ARM_DIR}/include) target_link_directories(MatMul PUBLIC ${ARM_DIR}/build) -target_link_libraries(MatMul arm_compute) \ No newline at end of file +target_link_libraries(MatMul arm_compute) + +add_dependencies(MatMul build_compute_library) \ No newline at end of file From 51b0a089f285c3a35390dce2796e24ce9fbac292 Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 29 Apr 2025 17:39:48 +0300 Subject: [PATCH 18/24] Add a Reshape Layer example --- app/layer_example/CMakeLists.txt | 8 ++++---- app/layer_example/MatMulLayer.cpp | 32 ------------------------------ app/layer_example/ReshapeLayer.cpp | 29 +++++++++++++++++++++++++++ 3 files changed, 33 insertions(+), 36 deletions(-) delete mode 100644 app/layer_example/MatMulLayer.cpp create mode 100644 app/layer_example/ReshapeLayer.cpp diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index c71fbd3..da0972f 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -1,11 +1,11 @@ set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary") -add_executable(MatMul MatMulLayer.cpp) +add_executable(Reshape ReshapeLayer.cpp) include_directories(${ARM_DIR}) include_directories(${ARM_DIR}/include) -target_link_directories(MatMul PUBLIC ${ARM_DIR}/build) +target_link_directories(Reshape PUBLIC ${ARM_DIR}/build) -target_link_libraries(MatMul arm_compute) +target_link_libraries(Reshape arm_compute) -add_dependencies(MatMul build_compute_library) \ No newline at end of file +add_dependencies(Reshape build_compute_library) \ No newline at end of file diff --git a/app/layer_example/MatMulLayer.cpp b/app/layer_example/MatMulLayer.cpp deleted file mode 100644 index ce50647..0000000 --- a/app/layer_example/MatMulLayer.cpp +++ /dev/null @@ -1,32 +0,0 @@ -#include "arm_compute/runtime/NEON/NEFunctions.h" -#include "utils/Utils.h" - -#include -using namespace arm_compute; -using namespace utils; - -int main() { - Tensor input1; - Tensor input2; - Tensor output; - - const int input_width = 5; - const int input_height = 5; - - input1.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - input2.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - output.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - - input1.allocator()->allocate(); - input2.allocator()->allocate(); - output.allocator()->allocate(); - - fill_random_tensor(input1, 0.f, 1.f); - fill_random_tensor(input2, 0.f, 1.f); - - NEMatMul m; - m.configure(&input1, &input2, &output, MatMulInfo(), CpuMatMulSettings(), ActivationLayerInfo()); - m.run(); - - output.print(std::cout); -} \ No newline at end of file diff --git a/app/layer_example/ReshapeLayer.cpp b/app/layer_example/ReshapeLayer.cpp new file mode 100644 index 0000000..218de87 --- /dev/null +++ b/app/layer_example/ReshapeLayer.cpp @@ -0,0 +1,29 @@ +#include +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "utils/Utils.h" + +using namespace arm_compute; +using namespace utils; + +int main() { + Tensor input; + Tensor output; + + const int input_width = 3; + const int input_height = 3; + + input.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + output.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + fill_random_tensor(input, 0.f, 1.f); + + NEReshapeLayer reshape; + reshape.configure(&input, &output); + + reshape.run(); + + output.print(std::cout); +} \ No newline at end of file From 3bab99a34d1cc0a9724064b207b38f2bffd773ed Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 29 Apr 2025 17:54:01 +0300 Subject: [PATCH 19/24] Add a Slice Layer example --- app/layer_example/CMakeLists.txt | 8 ++++---- app/layer_example/{ReshapeLayer.cpp => SliceLayer.cpp} | 9 ++++++--- 2 files changed, 10 insertions(+), 7 deletions(-) rename app/layer_example/{ReshapeLayer.cpp => SliceLayer.cpp} (73%) diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index da0972f..c40b428 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -1,11 +1,11 @@ set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary") -add_executable(Reshape ReshapeLayer.cpp) +add_executable(Slice SliceLayer.cpp) include_directories(${ARM_DIR}) include_directories(${ARM_DIR}/include) -target_link_directories(Reshape PUBLIC ${ARM_DIR}/build) +target_link_directories(Slice PUBLIC ${ARM_DIR}/build) -target_link_libraries(Reshape arm_compute) +target_link_libraries(Slice arm_compute) -add_dependencies(Reshape build_compute_library) \ No newline at end of file +add_dependencies(Slice build_compute_library) \ No newline at end of file diff --git a/app/layer_example/ReshapeLayer.cpp b/app/layer_example/SliceLayer.cpp similarity index 73% rename from app/layer_example/ReshapeLayer.cpp rename to app/layer_example/SliceLayer.cpp index 218de87..2b97cfd 100644 --- a/app/layer_example/ReshapeLayer.cpp +++ b/app/layer_example/SliceLayer.cpp @@ -12,6 +12,9 @@ int main() { const int input_width = 3; const int input_height = 3; + const Coordinates Starts = Coordinates(0, 0); + const Coordinates Ends = Coordinates(2, 0); + input.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); output.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); @@ -20,10 +23,10 @@ int main() { fill_random_tensor(input, 0.f, 1.f); - NEReshapeLayer reshape; - reshape.configure(&input, &output); + NESlice slice; + slice.configure(&input, &output, Starts, Ends); - reshape.run(); + slice.run(); output.print(std::cout); } \ No newline at end of file From fa78bd3881fa333c5c02072442300257a4f4763f Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 6 May 2025 17:30:10 +0300 Subject: [PATCH 20/24] Added a split layer example --- app/layer_example/CMakeLists.txt | 8 ++++---- app/layer_example/SliceLayer.cpp | 32 ------------------------------ app/layer_example/SplitLayer.cpp | 34 ++++++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 36 deletions(-) delete mode 100644 app/layer_example/SliceLayer.cpp create mode 100644 app/layer_example/SplitLayer.cpp diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index c40b428..0dc37a2 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -1,11 +1,11 @@ set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary") -add_executable(Slice SliceLayer.cpp) +add_executable(Split SplitLayer.cpp) include_directories(${ARM_DIR}) include_directories(${ARM_DIR}/include) -target_link_directories(Slice PUBLIC ${ARM_DIR}/build) +target_link_directories(Split PUBLIC ${ARM_DIR}/build) -target_link_libraries(Slice arm_compute) +target_link_libraries(Split arm_compute) -add_dependencies(Slice build_compute_library) \ No newline at end of file +add_dependencies(Split build_compute_library) \ No newline at end of file diff --git a/app/layer_example/SliceLayer.cpp b/app/layer_example/SliceLayer.cpp deleted file mode 100644 index 2b97cfd..0000000 --- a/app/layer_example/SliceLayer.cpp +++ /dev/null @@ -1,32 +0,0 @@ -#include -#include "arm_compute/runtime/NEON/NEFunctions.h" -#include "utils/Utils.h" - -using namespace arm_compute; -using namespace utils; - -int main() { - Tensor input; - Tensor output; - - const int input_width = 3; - const int input_height = 3; - - const Coordinates Starts = Coordinates(0, 0); - const Coordinates Ends = Coordinates(2, 0); - - input.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - output.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); - - input.allocator()->allocate(); - output.allocator()->allocate(); - - fill_random_tensor(input, 0.f, 1.f); - - NESlice slice; - slice.configure(&input, &output, Starts, Ends); - - slice.run(); - - output.print(std::cout); -} \ No newline at end of file diff --git a/app/layer_example/SplitLayer.cpp b/app/layer_example/SplitLayer.cpp new file mode 100644 index 0000000..d02fd24 --- /dev/null +++ b/app/layer_example/SplitLayer.cpp @@ -0,0 +1,34 @@ +#include +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "utils/Utils.h" + +using namespace arm_compute; +using namespace utils; + +int main() { + Tensor input; + const int input_width = 3; + const int input_height = 3; + const int channels = 2; + const int axis = 2; + + input.allocator()->init(TensorInfo(TensorShape(input_width, input_height, channels), 1, DataType::F32)); + input.allocator()->allocate(); + fill_random_tensor(input, 0.f, 1.f); + + Tensor output1, output2; + std::vector outputs = { &output1, &output2 }; + + NESplit split; + split.configure(&input, outputs, axis); + + output1.allocator()->allocate(); + output2.allocator()->allocate(); + + split.run(); + + output1.print(std::cout); + output2.print(std::cout); + + return 0; +} \ No newline at end of file From 07de634f0371728c88ac7e26eb7dd8bbe819761b Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 6 May 2025 17:36:28 +0300 Subject: [PATCH 21/24] Added a Concat Layer example --- app/layer_example/CMakeLists.txt | 8 +++---- app/layer_example/ConcatLayer.cpp | 36 +++++++++++++++++++++++++++++++ app/layer_example/SplitLayer.cpp | 34 ----------------------------- 3 files changed, 40 insertions(+), 38 deletions(-) create mode 100644 app/layer_example/ConcatLayer.cpp delete mode 100644 app/layer_example/SplitLayer.cpp diff --git a/app/layer_example/CMakeLists.txt b/app/layer_example/CMakeLists.txt index 0dc37a2..f47ced7 100644 --- a/app/layer_example/CMakeLists.txt +++ b/app/layer_example/CMakeLists.txt @@ -1,11 +1,11 @@ set(ARM_DIR "${CMAKE_SOURCE_DIR}/3rdparty/ComputeLibrary") -add_executable(Split SplitLayer.cpp) +add_executable(Concat ConcatLayer.cpp) include_directories(${ARM_DIR}) include_directories(${ARM_DIR}/include) -target_link_directories(Split PUBLIC ${ARM_DIR}/build) +target_link_directories(Concat PUBLIC ${ARM_DIR}/build) -target_link_libraries(Split arm_compute) +target_link_libraries(Concat arm_compute) -add_dependencies(Split build_compute_library) \ No newline at end of file +add_dependencies(Concat build_compute_library) \ No newline at end of file diff --git a/app/layer_example/ConcatLayer.cpp b/app/layer_example/ConcatLayer.cpp new file mode 100644 index 0000000..3ff90d7 --- /dev/null +++ b/app/layer_example/ConcatLayer.cpp @@ -0,0 +1,36 @@ +#include +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "utils/Utils.h" + +using namespace arm_compute; +using namespace utils; + +int main() { + Tensor input1, input2; + Tensor output; + std::vector input; + + const int input_width = 3; + const int input_height = 3; + const int axis = 2; + + input1.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + input2.allocator()->init(TensorInfo(TensorShape(input_width, input_height, 1), 1, DataType::F32)); + + input1.allocator()->allocate(); + input2.allocator()->allocate(); + + fill_random_tensor(input1, 0.f, 1.f); + fill_random_tensor(input2, 0.f, 1.f); + + input.push_back(&input1); + input.push_back(&input2); + + NEConcatenateLayer concat; + concat.configure(input, &output, axis); + output.allocator()->allocate(); + + concat.run(); + + output.print(std::cout); +} \ No newline at end of file diff --git a/app/layer_example/SplitLayer.cpp b/app/layer_example/SplitLayer.cpp deleted file mode 100644 index d02fd24..0000000 --- a/app/layer_example/SplitLayer.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include -#include "arm_compute/runtime/NEON/NEFunctions.h" -#include "utils/Utils.h" - -using namespace arm_compute; -using namespace utils; - -int main() { - Tensor input; - const int input_width = 3; - const int input_height = 3; - const int channels = 2; - const int axis = 2; - - input.allocator()->init(TensorInfo(TensorShape(input_width, input_height, channels), 1, DataType::F32)); - input.allocator()->allocate(); - fill_random_tensor(input, 0.f, 1.f); - - Tensor output1, output2; - std::vector outputs = { &output1, &output2 }; - - NESplit split; - split.configure(&input, outputs, axis); - - output1.allocator()->allocate(); - output2.allocator()->allocate(); - - split.run(); - - output1.print(std::cout); - output2.print(std::cout); - - return 0; -} \ No newline at end of file From 349c8650b596dcb0269529fdf2fd95a79b6cf29f Mon Sep 17 00:00:00 2001 From: chekalexey Date: Sat, 17 May 2025 12:11:24 +0300 Subject: [PATCH 22/24] Implement Layer abstraction for all layers --- include/layer/layer.h | 36 +++++++++ src/layer/ConcatenateLayer.cpp | 54 +++++++++++++ src/layer/ConvLayer.cpp | 79 ++++++++++++++++++ src/layer/ElementwiseLayer.cpp | 142 +++++++++++++++++++++++++++++++++ src/layer/MatMulLayer.cpp | 56 +++++++++++++ src/layer/PoolingLayer.cpp | 55 +++++++++++++ src/layer/ReshapeLayer.cpp | 47 +++++++++++ src/layer/ResizeLayer.cpp | 57 +++++++++++++ src/layer/SliceLayer.cpp | 52 ++++++++++++ src/layer/SoftmaxLayer.cpp | 50 ++++++++++++ src/layer/SplitLayer.cpp | 46 +++++++++++ src/layer/TransposeLayer.cpp | 51 ++++++++++++ src/layer/layer.cpp | 13 +++ 13 files changed, 738 insertions(+) create mode 100644 include/layer/layer.h create mode 100644 src/layer/ConcatenateLayer.cpp create mode 100644 src/layer/ConvLayer.cpp create mode 100644 src/layer/ElementwiseLayer.cpp create mode 100644 src/layer/MatMulLayer.cpp create mode 100644 src/layer/PoolingLayer.cpp create mode 100644 src/layer/ReshapeLayer.cpp create mode 100644 src/layer/ResizeLayer.cpp create mode 100644 src/layer/SliceLayer.cpp create mode 100644 src/layer/SoftmaxLayer.cpp create mode 100644 src/layer/SplitLayer.cpp create mode 100644 src/layer/TransposeLayer.cpp create mode 100644 src/layer/layer.cpp diff --git a/include/layer/layer.h b/include/layer/layer.h new file mode 100644 index 0000000..c01a83d --- /dev/null +++ b/include/layer/layer.h @@ -0,0 +1,36 @@ +#ifndef LAYER_H +#define LAYER_H + +#include + +#include "arm_compute/runtime/NEON/NEFunctions.h" +#include "utils/Utils.h" + +using namespace arm_compute; +using namespace utils; + +struct LayerAttributes { + int id = -1; +}; + +class Layer { + protected: + int id_; + + public: + Layer() = default; + explicit Layer(const LayerAttributes& attrs) : id_(attrs.id) {} + virtual ~Layer() = default; + void setID(int id) { id_ = id; } + int getID() const { return id_; } + virtual std::string getInfoString() const; + virtual void exec(Tensor& input, Tensor& output) = 0; + virtual void exec(Tensor& input1, Tensor& input2, Tensor& output) = 0; + //virtual Shape get_output_shape() = 0; + + virtual std::string get_type_name() const = 0; + void addNeighbor(Layer* neighbor); + void removeNeighbor(Layer* neighbor); + std::list neighbors_; +}; +#endif \ No newline at end of file diff --git a/src/layer/ConcatenateLayer.cpp b/src/layer/ConcatenateLayer.cpp new file mode 100644 index 0000000..db08b16 --- /dev/null +++ b/src/layer/ConcatenateLayer.cpp @@ -0,0 +1,54 @@ +#ifndef ACL_CONCATENATE_LAYER_H +#define ACL_CONCATENATE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class ConcatenateLayer : public Layer { + private: + std::vector input_shapes_config_; + TensorShape output_shape_; + unsigned int concatenation_axis_; + bool configured_ = false; + + public: + ConcatenateLayer(int id) { setID(id); } + + void configure(const std::vector& inputs_shapes, unsigned int axis, TensorShape& output_shape_ref) { + if (inputs_shapes.empty()) { + throw std::runtime_error("Concat: Input shapes list cannot be empty."); + } + + input_shapes_config_ = inputs_shapes; + concatenation_axis_ = axis; + output_shape_ = output_shape_ref; + configured_ = true; + } + + void exec(std::vector& input, Tensor& output) { + if (!configured_) { + throw std::runtime_error("ConcatenateLayer: Layer not configured."); + } + if (input.size() != input_shapes_config_.size()) { + throw std::runtime_error("ConcatenateLayer: different sizes of vectors."); + } + + output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); + + NEConcatenateLayer concat; + concat.configure(input, &output, concatenation_axis_); + output.allocator()->allocate(); + + concat.run(); + } + + std::string get_type_name() const override { + return "ConcatenateLayer"; + } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ConvLayer.cpp b/src/layer/ConvLayer.cpp new file mode 100644 index 0000000..d53578d --- /dev/null +++ b/src/layer/ConvLayer.cpp @@ -0,0 +1,79 @@ +#ifndef ACL_CONVOLUTION_LAYER_SIMPLIFIED_H +#define ACL_CONVOLUTION_LAYER_SIMPLIFIED_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class ConvolutionLayer : public Layer { +private: + + TensorShape input_shape_; + TensorShape weights_shape_; + TensorShape biases_shape_; + TensorShape output_shape_; + Tensor* biase_t; + Tensor* weight_t; + PadStrideInfo psi; + + bool configured_ = false; + +public: + ConvolutionLayer(int id) { setID(id); } + + void configure( + const TensorShape& input_s, + const TensorShape& weights_s, + Tensor& weights_t, + const TensorShape& biases_s, + Tensor& biases_t, + TensorShape& output_s_ref, + const PadStrideInfo& info + ) { + + input_shape_ = input_s; + weights_shape_ = weights_s; + biases_shape_ = biases_s; + psi = info; + output_shape_ = output_s_ref; + + NECopy copyb, copyw; + copyb.configure(biase_t, &biases_t); + copyb.run(); + copyw.configure(weight_t, &weights_t); + copyw.run(); + + weight_t->allocator()->init(TensorInfo(weights_shape_, 1, DataType::F32)); + biase_t->allocator()->init(TensorInfo(biases_shape_, 1, DataType::F32)); + + weight_t->allocator()->allocate(); + biase_t->allocator()->allocate(); + + configured_ = true; + } + + void exec(Tensor& input, Tensor& output) override { + if (!configured_) { + throw std::runtime_error("ConvolutionLayer: Layer not configured."); + } + + input.allocator()->init(TensorInfo(input_shape_, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + NEConvolutionLayer conv; + conv.configure(&input, weight_t, biase_t, &output, psi); + conv.run(); + } + + std::string get_type_name() const override { + return "ConvolutionLayer"; + } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ElementwiseLayer.cpp b/src/layer/ElementwiseLayer.cpp new file mode 100644 index 0000000..dd13ccd --- /dev/null +++ b/src/layer/ElementwiseLayer.cpp @@ -0,0 +1,142 @@ +#ifndef ACL_ELEMENTWISE_LAYER_H +#define ACL_ELEMENTWISE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +enum class ElementwiseOp { + ADD, + DIV, + ABS, + SIGM, + SWISH, + SQUARED_DIFF +}; + +class ElementwiseLayer : public Layer { +private: + ElementwiseOp op_type_; + TensorShape input1_shape, input2_shape; + TensorShape output_shape; + bool configured_ = false; + +public: + ElementwiseLayer(int id, ElementwiseOp op) : op_type_(op) { setID(id); } + + ElementwiseLayer() : ElementwiseLayer(0, ElementwiseOp::ADD) {} + + void configure(const TensorShape& input_shape, TensorShape& output_shape_) { + input1_shape = input_shape; + output_shape = input_shape; + configured_ = true; + } + + void configure(const TensorShape& input1_shape_, const TensorShape& input2_shape_, TensorShape& output_shape_) { + if (input1_shape.total_size() != input2_shape.total_size()) { + throw std::runtime_error( + "ElementwiseLayer: Input shapes must have same total size"); + } + + input1_shape = input1_shape_; + input2_shape = input2_shape_; + output_shape = output_shape_; + configured_ = true; + } + + void exec(Tensor& input, Tensor& output) override { + if (!configured_) { + throw std::runtime_error( + "ElementwiseLayer: Layer not configured before exec."); + } + + input.allocator()->init(TensorInfo(input1_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + switch (op_type_) { + case ElementwiseOp::ABS: { + NEActivationLayer abs; + abs.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::ABS)); + abs.run(); + break; + } + case ElementwiseOp::SIGM: { + NEActivationLayer sigm; + sigm.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)); + sigm.run(); + break; + } + case ElementwiseOp::SWISH: { + NEActivationLayer swish; + swish.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SWISH)); + swish.run(); + break; + } + default: + throw std::runtime_error( + "ElementwiseLayer: This operation requires two inputs"); + } + } + + void exec(Tensor& input1, Tensor& input2, Tensor& output) { + if (!configured_) { + throw std::runtime_error( + "ElementwiseLayer: Layer not configured before exec."); + } + + input1.allocator()->init(TensorInfo(input1_shape, 1, DataType::F32)); + input2.allocator()->init(TensorInfo(input2_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + + input1.allocator()->allocate(); + input2.allocator()->allocate(); + output.allocator()->allocate(); + + switch (op_type_) { + case ElementwiseOp::ADD: { + NEArithmeticAddition add; + add.configure(&input1, &input2, &output, ConvertPolicy::WRAP); + add.run(); + break; + } + case ElementwiseOp::DIV: { + NEElementwiseDivision div; + div.configure(&input1, &input2, &output); + div.run(); + break; + } + case ElementwiseOp::SQUARED_DIFF: { + NEElementwiseSquaredDiff sqdiff; + sqdiff.configure(&input1, &input2, &output); + sqdiff.run(); + break; + } + default: + throw std::runtime_error( + "ElementwiseLayer: This operation requires single input"); + } + } + + std::string get_type_name() const override { + switch (op_type_) { + case ElementwiseOp::ADD: return "ElementwiseAddLayer"; + case ElementwiseOp::DIV: return "ElementwiseDivLayer"; + case ElementwiseOp::ABS: return "ElementwiseAbsLayer"; + case ElementwiseOp::SIGM: return "ElementwiseSigmoidLayer"; + case ElementwiseOp::SWISH: return "ElementwiseSwishLayer"; + case ElementwiseOp::SQUARED_DIFF: return "ElementwiseSquaredDiffLayer"; + default:return "ElementwiseUnknownLayer"; + } + } +}; + +#endif \ No newline at end of file diff --git a/src/layer/MatMulLayer.cpp b/src/layer/MatMulLayer.cpp new file mode 100644 index 0000000..6d096a6 --- /dev/null +++ b/src/layer/MatMulLayer.cpp @@ -0,0 +1,56 @@ +#ifndef ACL_MATMUL_LAYER_H +#define ACL_MATMUL_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class MatMulLayer : public Layer { +private: + MatMulInfo matmul_info_; + TensorShape input_x_shape_; + TensorShape input_y_shape_; + TensorShape output_shape_; + bool configured_ = false; + +public: + MatMulLayer(int id, const MatMulInfo& info = MatMulInfo()) : matmul_info_(info) { + setID(id); + } + + void configure(TensorShape& input_x_shape, TensorShape& input_y_shape, TensorShape& output_shape_ref) { + input_x_shape_ = input_x_shape; + input_y_shape_ = input_y_shape; + output_shape_ = output_shape_ref; + + configured_ = true; + } + + void exec(Tensor& input_x, Tensor& input_y, Tensor& output) override { + if (!configured_) { + throw std::runtime_error("MatMulLayer: Layer not configured before exec."); + } + + input_x.allocator()->init(TensorInfo(input_x_shape_, 1, DataType::F32)); + input_y.allocator()->init(TensorInfo(input_y_shape_, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); + + input_x.allocator()->allocate(); + input_y.allocator()->allocate(); + output.allocator()->allocate(); + + NEMatMul m; + m.configure(&input_x, &input_y, &output, matmul_info_, CpuMatMulSettings(), ActivationLayerInfo()); + m.run(); + } + + std::string get_type_name() const override { return "MatMulLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/PoolingLayer.cpp b/src/layer/PoolingLayer.cpp new file mode 100644 index 0000000..831ce89 --- /dev/null +++ b/src/layer/PoolingLayer.cpp @@ -0,0 +1,55 @@ +#ifndef ACL_POOLING_LAYER_H +#define ACL_POOLING_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class PoolingLayer : public Layer { +private: + PoolingLayerInfo pool_info_; + TensorShape input_shape_; + TensorShape output_shape_; + bool configured_ = false; + +public: + PoolingLayer(int id) { + setID(id); + } + + void configure(TensorShape& input_shape, PoolingLayerInfo pli = PoolingLayerInfo(PoolingType::MAX, DataLayout::NHWC), TensorShape& output_shape_ref) { + if (input_shape.num_dimensions() < 2) { + throw std::runtime_error("PoolingLayer: Input must be at least 2D"); + } + pool_info_ = pli; + input_shape_ = input_shape; + output_shape_ = input_shape; + + output_shape_ = output_shape_ref; + + configured_ = true; + } + + void exec(Tensor& input, Tensor& output) override { + if (!configured_) { + throw std::runtime_error("PoolingLayer: Layer not configured before exec."); + } + + input.allocator()->init(TensorInfo(input_shape_, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + NEPoolingLayer pool; + pool.configure(&input, &output, pool_info_); + pool.run(); + } + + std::string get_type_name() const override { return "PoolingLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ReshapeLayer.cpp b/src/layer/ReshapeLayer.cpp new file mode 100644 index 0000000..b2b27a9 --- /dev/null +++ b/src/layer/ReshapeLayer.cpp @@ -0,0 +1,47 @@ +#ifndef ACL_RESHAPE_LAYER_H +#define ACL_RESHAPE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class ReshapeLayer : public Layer { +private: + TensorShape input_shape_config_; + TensorShape target_output_shape_config_; + bool configured_ = false; + +public: + ReshapeLayer(int id) { setID(id); } + + void configure(const TensorShape& input_shape, const TensorShape& target_output_shape, TensorShape& output_shape_ref) { + input_shape_config_ = input_shape; + target_output_shape_config_ = target_output_shape; + + configured_ = true; + } + + void exec(Tensor& input, Tensor& output) override { + if (!configured_) { + throw std::runtime_error("ReshapeLayer: Layer not configured."); + } + + input.allocator()->init(TensorInfo(input_shape_config_, 1, DataType::F32)); + output.allocator()->init(TensorInfo(target_output_shape_config_, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + NEReshapeLayer reshape; + reshape.configure(&input, &output); + + reshape.run(); + } + + std::string get_type_name() const override { return "ReshapeLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/ResizeLayer.cpp b/src/layer/ResizeLayer.cpp new file mode 100644 index 0000000..75fb3f3 --- /dev/null +++ b/src/layer/ResizeLayer.cpp @@ -0,0 +1,57 @@ +#ifndef ACL_RESIZE_LAYER_H +#define ACL_RESIZE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class ResizeLayer : public Layer { +private: + TensorShape input_shape_; + TensorShape output_shape_; + bool configured_ = false; + +public: + ResizeLayer(int id) { setID(id); } + + void configure(TensorShape& input_shape, TensorShape& output_shape) { + input_shape_ = input_shape; + output_shape_ = output_shape; + + configured_ = true; + } + + void exec(Tensor& input, Tensor& output) override { + if (!configured_) { + throw std::runtime_error( + "ResizeLayer: Layer not configured before exec."); + } + + input.allocator()->init(TensorInfo(input_shape_, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + NEScale resize; + resize.configure(&input, &output, + ScaleKernelInfo{ + InterpolationPolicy::NEAREST_NEIGHBOR, + BorderMode::REPLICATE, + PixelValue(), + SamplingPolicy::CENTER, + }); + + resize.run(); + } + + std::string get_type_name() const override { return "ResizeLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/SliceLayer.cpp b/src/layer/SliceLayer.cpp new file mode 100644 index 0000000..a1fe876 --- /dev/null +++ b/src/layer/SliceLayer.cpp @@ -0,0 +1,52 @@ +#ifndef ACL_SLICE_LAYER_H +#define ACL_SLICE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class SliceLayer : public Layer { + private: + TensorShape input_shape_config_; + TensorShape output_shape_; + Coordinates slice_starts_; + Coordinates slice_ends_; + bool configured_ = false; + + public: + SliceLayer(int id) { setID(id); } + + void configure(const TensorShape& input_shape, Coordinates starts, Coordinates ends, TensorShape& output_shape_ref) { + input_shape_config_ = input_shape; + slice_starts_ = starts; + slice_ends_ = ends; + output_shape_ = output_shape_ref; + + configured_ = true; + } + + void exec(Tensor& input, Tensor& output) override { + if (!configured_) { + throw std::runtime_error("SliceLayer: Layer not configured."); + } + + input.allocator()->init(TensorInfo(input_shape_config_, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + NESlice slice; + slice.configure(&input, &output, slice_starts_, slice_ends_); + + slice.run(); + } + + + std::string get_type_name() const override { return "SliceLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/SoftmaxLayer.cpp b/src/layer/SoftmaxLayer.cpp new file mode 100644 index 0000000..ed809e2 --- /dev/null +++ b/src/layer/SoftmaxLayer.cpp @@ -0,0 +1,50 @@ +#ifndef ACL_SOFTMAX_LAYER_H +#define ACL_SOFTMAX_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class SoftmaxLayer : public Layer { +private: + TensorShape input_shape_; + TensorShape output_shape_; + bool configured_ = false; + +public: + SoftmaxLayer(int id) { setID(id); } + + void configure(TensorShape& input_shape, TensorShape& output_shape_ref) { + input_shape_ = input_shape; + output_shape_ = output_shape_ref; + + configured_ = true; + } + + void exec(Tensor& input, Tensor& output) override { + if (!configured_) { + throw std::runtime_error( + "SoftmaxLayer: Layer not configured before exec."); + } + + input.allocator()->init(TensorInfo(input_shape_, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + NESoftmaxLayer m; + m.configure(&input, &output); + m.run(); + } + + std::string get_type_name() const override { return "SoftmaxLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/SplitLayer.cpp b/src/layer/SplitLayer.cpp new file mode 100644 index 0000000..9d40906 --- /dev/null +++ b/src/layer/SplitLayer.cpp @@ -0,0 +1,46 @@ +#ifndef ACL_SPLIT_LAYER_H +#define ACL_SPLIT_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +class SplitLayer : public Layer { + private: + TensorShape input_shape_config_; + std::vector output_shapes_computed_; + unsigned int split_axis_; + + bool configured_ = false; + + public: + SplitLayer(int id) { setID(id); } + + void configure(const TensorShape& input_shape, unsigned int axis, TensorShape& first_output_shape_ref) { + input_shape_config_ = input_shape; + split_axis_ = axis; + + configured_ = true; + } + + void exec(Tensor& input, std::vector& outputs) { + if (!configured_) { + throw std::runtime_error("SplitLayer: Layer not configured."); + } + + input.allocator()->init(TensorInfo(input_shape_config_, 1, DataType::F32)); + input.allocator()->allocate(); + + NESplit split; + split.configure(&input, outputs, split_axis_); + + split.run(); + } + + std::string get_type_name() const override { return "SplitLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/TransposeLayer.cpp b/src/layer/TransposeLayer.cpp new file mode 100644 index 0000000..bfbec2a --- /dev/null +++ b/src/layer/TransposeLayer.cpp @@ -0,0 +1,51 @@ +#ifndef ACL_TRANSPOSE_LAYER_H +#define ACL_TRANSPOSE_LAYER_H + +#include +#include +#include +#include + +#include "include/layer/layer.h" + +using namespace arm_compute; +using namespace utils; + +class TransposeLayer : public Layer { +private: + TensorShape input_shape_; + TensorShape output_shape_; + bool configured_ = false; + +public: + TransposeLayer(int id) { + setID(id); + } + + void configure(TensorShape& input_shape, TensorShape& output_shape_ref) { + input_shape_ = input_shape; + output_shape_ = output_shape_ref; + + configured_ = true; + } + + void exec(Tensor& input, Tensor& output) override { + if (!configured_) { + throw std::runtime_error("TransposeLayer: Layer not configured before exec."); + } + + input.allocator()->init(TensorInfo(input_shape_, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); + + input.allocator()->allocate(); + output.allocator()->allocate(); + + NETranspose t; + t.configure(&input, &output); + t.run(); + } + + std::string get_type_name() const override { return "TransposeLayer"; } +}; + +#endif \ No newline at end of file diff --git a/src/layer/layer.cpp b/src/layer/layer.cpp new file mode 100644 index 0000000..ee9261b --- /dev/null +++ b/src/layer/layer.cpp @@ -0,0 +1,13 @@ +#include "include/layer/layer.h" + +void Layer::addNeighbor(Layer* neighbor) { + if (neighbor != nullptr) { + neighbors_.push_back(neighbor); + } +} + +void Layer::removeNeighbor(Layer* neighbor) { neighbors_.remove(neighbor); } + +std::string Layer::getInfoString() const { + return "Layer (ID: " + std::to_string(id_) + ")"; +} \ No newline at end of file From d0a66bdc7afd6e79828db78e24c8d22e6c4cf074 Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 22 Jul 2025 17:56:16 +0300 Subject: [PATCH 23/24] Update matmul and pool layers --- include/layer/layer.h | 1 + src/layer/MatMulLayer.cpp | 44 ++++++++++++++++---------------------- src/layer/PoolingLayer.cpp | 40 ++++++++++++++-------------------- 3 files changed, 35 insertions(+), 50 deletions(-) diff --git a/include/layer/layer.h b/include/layer/layer.h index c01a83d..cc4797f 100644 --- a/include/layer/layer.h +++ b/include/layer/layer.h @@ -26,6 +26,7 @@ class Layer { virtual std::string getInfoString() const; virtual void exec(Tensor& input, Tensor& output) = 0; virtual void exec(Tensor& input1, Tensor& input2, Tensor& output) = 0; + virtual void exec() = 0; //virtual Shape get_output_shape() = 0; virtual std::string get_type_name() const = 0; diff --git a/src/layer/MatMulLayer.cpp b/src/layer/MatMulLayer.cpp index 6d096a6..67bb4fa 100644 --- a/src/layer/MatMulLayer.cpp +++ b/src/layer/MatMulLayer.cpp @@ -13,44 +13,36 @@ using namespace utils; class MatMulLayer : public Layer { private: - MatMulInfo matmul_info_; - TensorShape input_x_shape_; - TensorShape input_y_shape_; - TensorShape output_shape_; + NEMatMul m; bool configured_ = false; public: - MatMulLayer(int id, const MatMulInfo& info = MatMulInfo()) : matmul_info_(info) { + MatMulLayer(int id){ setID(id); } - void configure(TensorShape& input_x_shape, TensorShape& input_y_shape, TensorShape& output_shape_ref) { - input_x_shape_ = input_x_shape; - input_y_shape_ = input_y_shape; - output_shape_ = output_shape_ref; + void configure(TensorShape& input_x_shape, TensorShape& input_y_shape, TensorShape& output_shape, + Tensor& input_x, Tensor& input_y, Tensor& output) { + + input_x.allocator()->init(TensorInfo(input_x_shape, 1, DataType::F32)); + input_y.allocator()->init(TensorInfo(input_y_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + input_x.allocator()->allocate(); + input_y.allocator()->allocate(); + output.allocator()->allocate(); + m.configure(&input_x, &input_y, &output, MatMulInfo(), CpuMatMulSettings(), ActivationLayerInfo()); configured_ = true; } - void exec(Tensor& input_x, Tensor& input_y, Tensor& output) override { - if (!configured_) { - throw std::runtime_error("MatMulLayer: Layer not configured before exec."); + void exec() override { + if (!configured_) { + throw std::runtime_error("MatMulLayer: Layer not configured before exec."); + } + m.run(); } - input_x.allocator()->init(TensorInfo(input_x_shape_, 1, DataType::F32)); - input_y.allocator()->init(TensorInfo(input_y_shape_, 1, DataType::F32)); - output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); - - input_x.allocator()->allocate(); - input_y.allocator()->allocate(); - output.allocator()->allocate(); - - NEMatMul m; - m.configure(&input_x, &input_y, &output, matmul_info_, CpuMatMulSettings(), ActivationLayerInfo()); - m.run(); - } - - std::string get_type_name() const override { return "MatMulLayer"; } + std::string get_type_name() const override { return "MatMulLayer"; } }; #endif \ No newline at end of file diff --git a/src/layer/PoolingLayer.cpp b/src/layer/PoolingLayer.cpp index 831ce89..906abf7 100644 --- a/src/layer/PoolingLayer.cpp +++ b/src/layer/PoolingLayer.cpp @@ -10,9 +10,7 @@ class PoolingLayer : public Layer { private: - PoolingLayerInfo pool_info_; - TensorShape input_shape_; - TensorShape output_shape_; + NEPoolingLayer pool; bool configured_ = false; public: @@ -20,36 +18,30 @@ class PoolingLayer : public Layer { setID(id); } - void configure(TensorShape& input_shape, PoolingLayerInfo pli = PoolingLayerInfo(PoolingType::MAX, DataLayout::NHWC), TensorShape& output_shape_ref) { + void configure(TensorShape& input_shape, + TensorShape& output_shape, Tensor& input, Tensor& output) { if (input_shape.num_dimensions() < 2) { throw std::runtime_error("PoolingLayer: Input must be at least 2D"); } - pool_info_ = pli; - input_shape_ = input_shape; - output_shape_ = input_shape; + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); - output_shape_ = output_shape_ref; + input.allocator()->allocate(); + output.allocator()->allocate(); - configured_ = true; - } + pool.configure(&input, &output, PoolingLayerInfo(PoolingType::MAX, DataLayout::NHWC)); - void exec(Tensor& input, Tensor& output) override { - if (!configured_) { - throw std::runtime_error("PoolingLayer: Layer not configured before exec."); + configured_ = true; } - input.allocator()->init(TensorInfo(input_shape_, 1, DataType::F32)); - output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); - - input.allocator()->allocate(); - output.allocator()->allocate(); - - NEPoolingLayer pool; - pool.configure(&input, &output, pool_info_); - pool.run(); - } + void exec() override { + if (!configured_) { + throw std::runtime_error("PoolingLayer: Layer not configured before exec."); + } + pool.run(); + } - std::string get_type_name() const override { return "PoolingLayer"; } + std::string get_type_name() const override { return "PoolingLayer"; } }; #endif \ No newline at end of file From bcf45364f4eb811129dae76368b516cd5a6ece9b Mon Sep 17 00:00:00 2001 From: chekalexey Date: Tue, 29 Jul 2025 17:11:27 +0300 Subject: [PATCH 24/24] Update other layers --- src/layer/ConcatenateLayer.cpp | 70 ++++++++------- src/layer/ConvLayer.cpp | 80 +++++++----------- src/layer/ElementwiseLayer.cpp | 150 ++++++++++++++++----------------- src/layer/ReshapeLayer.cpp | 27 +++--- src/layer/ResizeLayer.cpp | 45 ++++------ src/layer/SliceLayer.cpp | 55 +++++------- src/layer/SoftmaxLayer.cpp | 27 +++--- src/layer/SplitLayer.cpp | 43 ++++------ src/layer/TransposeLayer.cpp | 24 ++---- 9 files changed, 224 insertions(+), 297 deletions(-) diff --git a/src/layer/ConcatenateLayer.cpp b/src/layer/ConcatenateLayer.cpp index db08b16..3d44103 100644 --- a/src/layer/ConcatenateLayer.cpp +++ b/src/layer/ConcatenateLayer.cpp @@ -9,46 +9,44 @@ #include "include/layer/layer.h" class ConcatenateLayer : public Layer { - private: - std::vector input_shapes_config_; - TensorShape output_shape_; - unsigned int concatenation_axis_; - bool configured_ = false; - - public: - ConcatenateLayer(int id) { setID(id); } - - void configure(const std::vector& inputs_shapes, unsigned int axis, TensorShape& output_shape_ref) { - if (inputs_shapes.empty()) { - throw std::runtime_error("Concat: Input shapes list cannot be empty."); +private: + NEConcatenateLayer concat; + bool configured_ = false; + +public: + ConcatenateLayer(int id) { setID(id); } + + void configure(const std::vector& inputs_shapes, unsigned int axis, TensorShape& output_shape, + std::vector& input, Tensor& output) { + + if (inputs_shapes.empty()) { + throw std::runtime_error("Concat: Input shapes list cannot be empty."); + } + if (inputs_shapes.size() != input.size()) { + throw std::runtime_error("Concat: vector size mismatch."); + } + std::vector inpcopy; + for (int i = 0; i < input.size(); i++) { + input[i]->allocator()->init(TensorInfo(inputs_shapes[i], 1, DataType::F32)); + input[i]->allocator()->allocate(); + inpcopy.push_back(input[i]); + } + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + concat.configure(inpcopy, &output, axis); + output.allocator()->allocate(); + configured_ = true; } - input_shapes_config_ = inputs_shapes; - concatenation_axis_ = axis; - output_shape_ = output_shape_ref; - configured_ = true; - } - - void exec(std::vector& input, Tensor& output) { - if (!configured_) { - throw std::runtime_error("ConcatenateLayer: Layer not configured."); + void exec() override { + if (!configured_) { + throw std::runtime_error("ConcatenateLayer: Layer not configured."); + } + concat.run(); } - if (input.size() != input_shapes_config_.size()) { - throw std::runtime_error("ConcatenateLayer: different sizes of vectors."); - } - - output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); - NEConcatenateLayer concat; - concat.configure(input, &output, concatenation_axis_); - output.allocator()->allocate(); - - concat.run(); - } - - std::string get_type_name() const override { - return "ConcatenateLayer"; - } + std::string get_type_name() const override { + return "ConcatenateLayer"; + } }; #endif \ No newline at end of file diff --git a/src/layer/ConvLayer.cpp b/src/layer/ConvLayer.cpp index d53578d..5771eb6 100644 --- a/src/layer/ConvLayer.cpp +++ b/src/layer/ConvLayer.cpp @@ -10,64 +10,42 @@ class ConvolutionLayer : public Layer { private: - - TensorShape input_shape_; - TensorShape weights_shape_; - TensorShape biases_shape_; - TensorShape output_shape_; - Tensor* biase_t; - Tensor* weight_t; - PadStrideInfo psi; - - bool configured_ = false; + NEConvolutionLayer conv; + bool configured_ = false; public: - ConvolutionLayer(int id) { setID(id); } - - void configure( - const TensorShape& input_s, - const TensorShape& weights_s, - Tensor& weights_t, - const TensorShape& biases_s, - Tensor& biases_t, - TensorShape& output_s_ref, - const PadStrideInfo& info - ) { - - input_shape_ = input_s; - weights_shape_ = weights_s; - biases_shape_ = biases_s; - psi = info; - output_shape_ = output_s_ref; + ConvolutionLayer(int id) { setID(id); } + + void configure( + const TensorShape& input_shape, + const TensorShape& weights_shape, + const TensorShape& biases_shape, + TensorShape& output_shape, + const PadStrideInfo& info, + Tensor& input, + Tensor& weights, + Tensor& biases, + Tensor& output + ) { + + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + weights.allocator()->init(TensorInfo(weights_shape, 1, DataType::F32)); + biases.allocator()->init(TensorInfo(biases_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); - NECopy copyb, copyw; - copyb.configure(biase_t, &biases_t); - copyb.run(); - copyw.configure(weight_t, &weights_t); - copyw.run(); - - weight_t->allocator()->init(TensorInfo(weights_shape_, 1, DataType::F32)); - biase_t->allocator()->init(TensorInfo(biases_shape_, 1, DataType::F32)); - - weight_t->allocator()->allocate(); - biase_t->allocator()->allocate(); - - configured_ = true; - } + input.allocator()->allocate(); + weights.allocator()->allocate(); + biases.allocator()->allocate(); + output.allocator()->allocate(); + + conv.configure(&input, &weights, &biases, &output, info); + configured_ = true; + } - void exec(Tensor& input, Tensor& output) override { + void exec() override { if (!configured_) { throw std::runtime_error("ConvolutionLayer: Layer not configured."); } - - input.allocator()->init(TensorInfo(input_shape_, 1, DataType::F32)); - output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); - - input.allocator()->allocate(); - output.allocator()->allocate(); - - NEConvolutionLayer conv; - conv.configure(&input, weight_t, biase_t, &output, psi); conv.run(); } diff --git a/src/layer/ElementwiseLayer.cpp b/src/layer/ElementwiseLayer.cpp index dd13ccd..619ff36 100644 --- a/src/layer/ElementwiseLayer.cpp +++ b/src/layer/ElementwiseLayer.cpp @@ -22,77 +22,53 @@ enum class ElementwiseOp { class ElementwiseLayer : public Layer { private: - ElementwiseOp op_type_; - TensorShape input1_shape, input2_shape; - TensorShape output_shape; + ElementwiseOp op_type; + NEActivationLayer act; + NEArithmeticAddition add; + NEElementwiseDivision div; + NEElementwiseSquaredDiff sqdiff; bool configured_ = false; public: - ElementwiseLayer(int id, ElementwiseOp op) : op_type_(op) { setID(id); } + ElementwiseLayer(int id, ElementwiseOp op) : op_type(op) { setID(id); } - ElementwiseLayer() : ElementwiseLayer(0, ElementwiseOp::ADD) {} + ElementwiseLayer() : ElementwiseLayer(0, ElementwiseOp::ADD) { } - void configure(const TensorShape& input_shape, TensorShape& output_shape_) { - input1_shape = input_shape; - output_shape = input_shape; - configured_ = true; - } - - void configure(const TensorShape& input1_shape_, const TensorShape& input2_shape_, TensorShape& output_shape_) { - if (input1_shape.total_size() != input2_shape.total_size()) { - throw std::runtime_error( - "ElementwiseLayer: Input shapes must have same total size"); - } - - input1_shape = input1_shape_; - input2_shape = input2_shape_; - output_shape = output_shape_; - configured_ = true; - } - - void exec(Tensor& input, Tensor& output) override { - if (!configured_) { - throw std::runtime_error( - "ElementwiseLayer: Layer not configured before exec."); - } - - input.allocator()->init(TensorInfo(input1_shape, 1, DataType::F32)); - output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + void configure(const TensorShape& input_shape, TensorShape& output_shape, Tensor& input, Tensor& output) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); - input.allocator()->allocate(); - output.allocator()->allocate(); + input.allocator()->allocate(); + output.allocator()->allocate(); - switch (op_type_) { - case ElementwiseOp::ABS: { - NEActivationLayer abs; - abs.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::ABS)); - abs.run(); - break; - } - case ElementwiseOp::SIGM: { - NEActivationLayer sigm; - sigm.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)); - sigm.run(); - break; - } - case ElementwiseOp::SWISH: { - NEActivationLayer swish; - swish.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SWISH)); - swish.run(); - break; + switch (op_type) { + case ElementwiseOp::ABS: { + act.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::ABS)); + act.run(); + break; + } + case ElementwiseOp::SIGM: { + act.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LOGISTIC)); + act.run(); + break; + } + case ElementwiseOp::SWISH: { + act.configure(&input, &output, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::SWISH)); + act.run(); + break; + } + default: + throw std::runtime_error("ElementwiseLayer: This operation requires two inputs"); } - default: - throw std::runtime_error( - "ElementwiseLayer: This operation requires two inputs"); - } + configured_ = true; } - void exec(Tensor& input1, Tensor& input2, Tensor& output) { - if (!configured_) { + void configure(const TensorShape& input1_shape, const TensorShape& input2_shape, TensorShape& output_shape, + Tensor& input1, Tensor& input2, Tensor& output) { + if (input1_shape.total_size() != input2_shape.total_size()) { throw std::runtime_error( - "ElementwiseLayer: Layer not configured before exec."); + "ElementwiseLayer: Input shapes must have same total size"); } - input1.allocator()->init(TensorInfo(input1_shape, 1, DataType::F32)); input2.allocator()->init(TensorInfo(input2_shape, 1, DataType::F32)); output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); @@ -101,33 +77,57 @@ class ElementwiseLayer : public Layer { input2.allocator()->allocate(); output.allocator()->allocate(); - switch (op_type_) { + switch (op_type) { + case ElementwiseOp::ADD: { + add.configure(&input1, &input2, &output, ConvertPolicy::WRAP); + add.run(); + break; + } + case ElementwiseOp::DIV: { + div.configure(&input1, &input2, &output); + div.run(); + break; + } + case ElementwiseOp::SQUARED_DIFF: { + sqdiff.configure(&input1, &input2, &output); + sqdiff.run(); + break; + } + default: + throw std::runtime_error("ElementwiseLayer: This operation requires single input"); + } + configured_ = true; + } + + void exec() override { + if (!configured_) { + throw std::runtime_error("ElementwiseLayer: Layer not configured before exec."); + } + switch (op_type) { + case ElementwiseOp::ABS: + case ElementwiseOp::SIGM: + case ElementwiseOp::SWISH: + act.run(); + break; case ElementwiseOp::ADD: { - NEArithmeticAddition add; - add.configure(&input1, &input2, &output, ConvertPolicy::WRAP); - add.run(); - break; + add.run(); + break; } case ElementwiseOp::DIV: { - NEElementwiseDivision div; - div.configure(&input1, &input2, &output); - div.run(); - break; + div.run(); + break; } case ElementwiseOp::SQUARED_DIFF: { - NEElementwiseSquaredDiff sqdiff; - sqdiff.configure(&input1, &input2, &output); - sqdiff.run(); - break; + sqdiff.run(); + break; } default: - throw std::runtime_error( - "ElementwiseLayer: This operation requires single input"); + throw std::runtime_error("ElementwiseLayer: This operation requires single input"); } } std::string get_type_name() const override { - switch (op_type_) { + switch (op_type) { case ElementwiseOp::ADD: return "ElementwiseAddLayer"; case ElementwiseOp::DIV: return "ElementwiseDivLayer"; case ElementwiseOp::ABS: return "ElementwiseAbsLayer"; diff --git a/src/layer/ReshapeLayer.cpp b/src/layer/ReshapeLayer.cpp index b2b27a9..b2af53a 100644 --- a/src/layer/ReshapeLayer.cpp +++ b/src/layer/ReshapeLayer.cpp @@ -10,38 +10,31 @@ class ReshapeLayer : public Layer { private: - TensorShape input_shape_config_; - TensorShape target_output_shape_config_; + NEReshapeLayer reshape; bool configured_ = false; public: ReshapeLayer(int id) { setID(id); } - void configure(const TensorShape& input_shape, const TensorShape& target_output_shape, TensorShape& output_shape_ref) { - input_shape_config_ = input_shape; - target_output_shape_config_ = target_output_shape; + void configure(const TensorShape& input_shape, TensorShape& output_shape, Tensor& input, Tensor& output) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + input.allocator()->allocate(); + output.allocator()->allocate(); + + reshape.configure(&input, &output); configured_ = true; } - void exec(Tensor& input, Tensor& output) override { + void exec() override { if (!configured_) { throw std::runtime_error("ReshapeLayer: Layer not configured."); } - - input.allocator()->init(TensorInfo(input_shape_config_, 1, DataType::F32)); - output.allocator()->init(TensorInfo(target_output_shape_config_, 1, DataType::F32)); - - input.allocator()->allocate(); - output.allocator()->allocate(); - - NEReshapeLayer reshape; - reshape.configure(&input, &output); - reshape.run(); } - std::string get_type_name() const override { return "ReshapeLayer"; } + std::string get_type_name() const override { return "ReshapeLayer"; } }; #endif \ No newline at end of file diff --git a/src/layer/ResizeLayer.cpp b/src/layer/ResizeLayer.cpp index 75fb3f3..ed3c934 100644 --- a/src/layer/ResizeLayer.cpp +++ b/src/layer/ResizeLayer.cpp @@ -13,45 +13,36 @@ using namespace utils; class ResizeLayer : public Layer { private: - TensorShape input_shape_; - TensorShape output_shape_; - bool configured_ = false; + NEScale resize; + bool configured_ = false; public: - ResizeLayer(int id) { setID(id); } - - void configure(TensorShape& input_shape, TensorShape& output_shape) { - input_shape_ = input_shape; - output_shape_ = output_shape; - - configured_ = true; - } - - void exec(Tensor& input, Tensor& output) override { - if (!configured_) { - throw std::runtime_error( - "ResizeLayer: Layer not configured before exec."); - } + ResizeLayer(int id) { setID(id); } + void configure(TensorShape& input_shape, TensorShape& output_shape, Tensor& input, Tensor& output) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); - input.allocator()->init(TensorInfo(input_shape_, 1, DataType::F32)); - output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); + input.allocator()->allocate(); + output.allocator()->allocate(); - input.allocator()->allocate(); - output.allocator()->allocate(); - - NEScale resize; - resize.configure(&input, &output, + resize.configure(&input, &output, ScaleKernelInfo{ InterpolationPolicy::NEAREST_NEIGHBOR, BorderMode::REPLICATE, PixelValue(), SamplingPolicy::CENTER, }); + configured_ = true; + } - resize.run(); - } + void exec() override { + if (!configured_) { + throw std::runtime_error("ResizeLayer: Layer not configured before exec."); + } + resize.run(); + } - std::string get_type_name() const override { return "ResizeLayer"; } + std::string get_type_name() const override { return "ResizeLayer"; } }; #endif \ No newline at end of file diff --git a/src/layer/SliceLayer.cpp b/src/layer/SliceLayer.cpp index a1fe876..8c9f9ab 100644 --- a/src/layer/SliceLayer.cpp +++ b/src/layer/SliceLayer.cpp @@ -8,45 +8,34 @@ #include "include/layer/layer.h" -class SliceLayer : public Layer { - private: - TensorShape input_shape_config_; - TensorShape output_shape_; - Coordinates slice_starts_; - Coordinates slice_ends_; - bool configured_ = false; - - public: - SliceLayer(int id) { setID(id); } - - void configure(const TensorShape& input_shape, Coordinates starts, Coordinates ends, TensorShape& output_shape_ref) { - input_shape_config_ = input_shape; - slice_starts_ = starts; - slice_ends_ = ends; - output_shape_ = output_shape_ref; - - configured_ = true; - } - - void exec(Tensor& input, Tensor& output) override { - if (!configured_) { - throw std::runtime_error("SliceLayer: Layer not configured."); - } +class SliceLayer : public Layer { +private: + NESlice slice; + bool configured_ = false; - input.allocator()->init(TensorInfo(input_shape_config_, 1, DataType::F32)); - output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); +public: + SliceLayer(int id) { setID(id); } - input.allocator()->allocate(); - output.allocator()->allocate(); + void configure(const TensorShape& input_shape, Coordinates starts, Coordinates ends, + TensorShape& output_shape, Tensor& input, Tensor& output) { - NESlice slice; - slice.configure(&input, &output, slice_starts_, slice_ends_); + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); - slice.run(); - } + input.allocator()->allocate(); + output.allocator()->allocate(); + slice.configure(&input, &output, starts, ends); + configured_ = true; + } - std::string get_type_name() const override { return "SliceLayer"; } + void exec() override { + if (!configured_) { + throw std::runtime_error("SliceLayer: Layer not configured."); + } + slice.run(); + } + std::string get_type_name() const override { return "SliceLayer"; } }; #endif \ No newline at end of file diff --git a/src/layer/SoftmaxLayer.cpp b/src/layer/SoftmaxLayer.cpp index ed809e2..2475bf7 100644 --- a/src/layer/SoftmaxLayer.cpp +++ b/src/layer/SoftmaxLayer.cpp @@ -13,34 +13,27 @@ using namespace utils; class SoftmaxLayer : public Layer { private: - TensorShape input_shape_; - TensorShape output_shape_; + NESoftmaxLayer m; bool configured_ = false; public: SoftmaxLayer(int id) { setID(id); } - void configure(TensorShape& input_shape, TensorShape& output_shape_ref) { - input_shape_ = input_shape; - output_shape_ = output_shape_ref; + void configure(TensorShape& input_shape, TensorShape& output_shape, Tensor& input, Tensor& output) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + input.allocator()->allocate(); + output.allocator()->allocate(); + + m.configure(&input, &output); configured_ = true; } - void exec(Tensor& input, Tensor& output) override { + void exec() override { if (!configured_) { - throw std::runtime_error( - "SoftmaxLayer: Layer not configured before exec."); + throw std::runtime_error("SoftmaxLayer: Layer not configured before exec."); } - - input.allocator()->init(TensorInfo(input_shape_, 1, DataType::F32)); - output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); - - input.allocator()->allocate(); - output.allocator()->allocate(); - - NESoftmaxLayer m; - m.configure(&input, &output); m.run(); } diff --git a/src/layer/SplitLayer.cpp b/src/layer/SplitLayer.cpp index 9d40906..45ea295 100644 --- a/src/layer/SplitLayer.cpp +++ b/src/layer/SplitLayer.cpp @@ -9,38 +9,29 @@ #include "include/layer/layer.h" class SplitLayer : public Layer { - private: - TensorShape input_shape_config_; - std::vector output_shapes_computed_; - unsigned int split_axis_; - - bool configured_ = false; - - public: - SplitLayer(int id) { setID(id); } +private: + NESplit split; + bool configured_ = false; - void configure(const TensorShape& input_shape, unsigned int axis, TensorShape& first_output_shape_ref) { - input_shape_config_ = input_shape; - split_axis_ = axis; +public: + SplitLayer(int id) { setID(id); } - configured_ = true; - } + void configure(const TensorShape& input_shape, unsigned int axis, Tensor& input, std::vector& outputs) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + input.allocator()->allocate(); - void exec(Tensor& input, std::vector& outputs) { - if (!configured_) { - throw std::runtime_error("SplitLayer: Layer not configured."); + split.configure(&input, outputs, axis); + configured_ = true; } - - input.allocator()->init(TensorInfo(input_shape_config_, 1, DataType::F32)); - input.allocator()->allocate(); - NESplit split; - split.configure(&input, outputs, split_axis_); - - split.run(); - } + void exec() override { + if (!configured_) { + throw std::runtime_error("SplitLayer: Layer not configured."); + } + split.run(); + } - std::string get_type_name() const override { return "SplitLayer"; } + std::string get_type_name() const override { return "SplitLayer"; } }; #endif \ No newline at end of file diff --git a/src/layer/TransposeLayer.cpp b/src/layer/TransposeLayer.cpp index bfbec2a..9d997bb 100644 --- a/src/layer/TransposeLayer.cpp +++ b/src/layer/TransposeLayer.cpp @@ -13,8 +13,7 @@ using namespace utils; class TransposeLayer : public Layer { private: - TensorShape input_shape_; - TensorShape output_shape_; + NETranspose t; bool configured_ = false; public: @@ -22,26 +21,21 @@ class TransposeLayer : public Layer { setID(id); } - void configure(TensorShape& input_shape, TensorShape& output_shape_ref) { - input_shape_ = input_shape; - output_shape_ = output_shape_ref; + void configure(TensorShape& input_shape, TensorShape& output_shape, Tensor& input, Tensor& output) { + input.allocator()->init(TensorInfo(input_shape, 1, DataType::F32)); + output.allocator()->init(TensorInfo(output_shape, 1, DataType::F32)); + input.allocator()->allocate(); + output.allocator()->allocate(); + + t.configure(&input, &output); configured_ = true; } - void exec(Tensor& input, Tensor& output) override { + void exec() override { if (!configured_) { throw std::runtime_error("TransposeLayer: Layer not configured before exec."); } - - input.allocator()->init(TensorInfo(input_shape_, 1, DataType::F32)); - output.allocator()->init(TensorInfo(output_shape_, 1, DataType::F32)); - - input.allocator()->allocate(); - output.allocator()->allocate(); - - NETranspose t; - t.configure(&input, &output); t.run(); }