diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 31adbd5..eaf6a59 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -15,6 +15,7 @@ jobs: submodules: recursive - name: Install dependencies run: | + sudo apt-get update sudo apt-get install -y cmake ninja-build ccache scons - name: ccache uses: hendrikmuhs/ccache-action@v1.2 @@ -37,6 +38,7 @@ jobs: submodules: recursive - name: Install dependencies run: | + sudo apt-get update sudo apt-get install -y cmake ninja-build ccache scons - name: ccache uses: hendrikmuhs/ccache-action@v1.2 @@ -81,6 +83,7 @@ jobs: submodules: true - name: Install dependencies run: | + sudo apt-get update sudo apt-get install -y cmake ninja-build ccache gcovr lcov scons - uses: actions/checkout@v4 with: @@ -102,7 +105,7 @@ jobs: cmake --build build --parallel - name: Test run: | - build/bin/run_tests + build/test/run_test env: CTEST_OUTPUT_ON_FAILURE: 1 - name: Generate lcov Coverage Data diff --git a/CMakeLists.txt b/CMakeLists.txt index a8e8360..98d5ae1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,15 +1,22 @@ cmake_minimum_required(VERSION 3.20) +set(CMAKE_CXX_STANDARD 11) -project(cpp_template) - -include(cmake/configure.cmake) +set(ProjectName "itlab") +project(${ProjectName}) include_directories(include) enable_testing() -add_subdirectory(3rdparty) -add_subdirectory(app) -add_subdirectory(include) + +add_subdirectory(3rdparty/googletest) add_subdirectory(src) add_subdirectory(test) + +# REPORT +message( STATUS "") +message( STATUS "General configuration for ${PROJECT_NAME}") +message( STATUS "======================================") +message( STATUS "") +message( STATUS " Configuration: ${CMAKE_BUILD_TYPE}") +message( STATUS "") \ No newline at end of file diff --git a/include/graph/graph.h b/include/graph/graph.h new file mode 100644 index 0000000..501de3e --- /dev/null +++ b/include/graph/graph.h @@ -0,0 +1,38 @@ +#ifndef GRAPH_H +#define GRAPH_H + +#include +#include + +#include "./layer/layer.h" +#include "./tensor/tensor.h" + +class Graph { + private: + std::unordered_map layers_; + Tensor inputTensor_; + Tensor* outputTensor_; + int start_ = -1; + int end_ = -1; + bool bfs_helper(int start, int vert, bool flag, + std::vector* v_ord) const; + + public: + Graph(); + + void addLayer(Layer& lay); + void addEdge(Layer& layPrev, Layer& layNext); + void removeEdge(Layer& layPrev, Layer& layNext); + void removeLayer(Layer& lay); + int getLayers() const; + int getEdges() const; + bool empty() const; + bool hasPath(Layer& layPrev, Layer& layNext) const; + std::vector BFS(int start); + void setInput(Layer& lay, Tensor& vec); + void setOutput(Layer& lay, Tensor& vec); + void inference(); + ~Graph(); +}; + +#endif diff --git a/include/layer/layer.h b/include/layer/layer.h new file mode 100644 index 0000000..c45c6d3 --- /dev/null +++ b/include/layer/layer.h @@ -0,0 +1,24 @@ +#ifndef LAYER_H +#define LAYER_H + +#include + +#include "./tensor/tensor.h" + +class Layer { + protected: + int id_; + + public: + Layer() = default; + virtual ~Layer() = default; + void setID(int id) { id_ = id; } + int getID() const { return id_; } + virtual void run(const Tensor& input, Tensor& output) = 0; + virtual Shape get_output_shape() = 0; + + void addNeighbor(Layer* neighbor); + void removeNeighbor(Layer* neighbor); + std::list neighbors_; +}; +#endif \ No newline at end of file diff --git a/include/tensor/tensor.h b/include/tensor/tensor.h new file mode 100644 index 0000000..b1df5c4 --- /dev/null +++ b/include/tensor/tensor.h @@ -0,0 +1,98 @@ +#ifndef TENSOR_H +#define TENSOR_H + +#include +#include +#include +#include +#include +#include + +struct Shape { + std::vector dimensions; + size_t total_elements; + + Shape(std::vector dims); + + size_t get_rank() const; +}; + +enum Layout : std::uint8_t { kNchw, kNhwc, kNd }; + +template +class Tensor { + public: + Shape shape; + Layout layout; + std::vector data; + + Tensor(const Shape &sh, Layout l = Layout::kNd); + Tensor(std::vector dims, Layout l = Layout::kNd); + + size_t get_linear_index(const std::vector &indices) const; + + T &at(const std::vector &indices); + const T &at(const std::vector &indices) const; +}; + +template +Tensor::Tensor(const Shape &sh, Layout l) + : shape(sh), layout(l), data(sh.total_elements) {} + +template +Tensor::Tensor(std::vector dims, Layout l) + : Tensor(Shape(std::move(dims)), l) {} + +template +size_t Tensor::get_linear_index(const std::vector &indices) const { + if (indices.size() != shape.get_rank()) { + throw std::runtime_error("Incorrect number of indices provided."); + } + for (size_t i = 0; i < indices.size(); ++i) { + if (indices[i] >= shape.dimensions[i]) { + throw std::out_of_range("Index out of range for dimension"); + } + } + + size_t linear_index = 0; + size_t stride = 1; + + if (shape.get_rank() == 4) { + if (layout == Layout::kNchw) { + linear_index = indices[0] * (shape.dimensions[1] * shape.dimensions[2] * + shape.dimensions[3]) + + indices[1] * (shape.dimensions[2] * shape.dimensions[3]) + + indices[2] * shape.dimensions[3] + indices[3]; + } else if (layout == Layout::kNhwc) { + linear_index = indices[0] * (shape.dimensions[1] * shape.dimensions[2] * + shape.dimensions[3]) + + indices[1] * (shape.dimensions[2] * shape.dimensions[3]) + + indices[2] * shape.dimensions[3] + indices[3]; + } else { + linear_index = indices[0] * (shape.dimensions[1] * shape.dimensions[2] * + shape.dimensions[3]) + + indices[1] * (shape.dimensions[2] * shape.dimensions[3]) + + indices[2] * shape.dimensions[3] + indices[3]; + } + } else { + std::vector reversed_dims = shape.dimensions; + std::reverse(reversed_dims.begin(), reversed_dims.end()); + for (int i = static_cast(reversed_dims.size()) - 1; i >= 0; --i) { + linear_index += indices[i] * stride; + stride *= reversed_dims[i]; + } + } + + return linear_index; +} + +template +T &Tensor::at(const std::vector &indices) { + return data[get_linear_index(indices)]; +} + +template +const T &Tensor::at(const std::vector &indices) const { + return data[get_linear_index(indices)]; +} +#endif \ No newline at end of file diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index e69de29..af77d7d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB_RECURSE HEADER_FILES "${CMAKE_SOURCE_DIR}/include/*.h") +file(GLOB_RECURSE SOURCE_FILES "${CMAKE_SOURCE_DIR}/src/*.cpp") + +add_library(${ProjectName} STATIC ${SOURCE_FILES} ${HEADER_FILES}) +target_sources(${ProjectName} PRIVATE ${HEADER_FILES}) + +target_include_directories(${ProjectName} PUBLIC ${CMAKE_SOURCE_DIR}/src) \ No newline at end of file diff --git a/src/graph/graph.cpp b/src/graph/graph.cpp new file mode 100644 index 0000000..885772e --- /dev/null +++ b/src/graph/graph.cpp @@ -0,0 +1,179 @@ +#include "./graph/graph.h" + +#include +#include +#include +#include +#include + +#include "./layer/layer.h" +#include "./tensor/tensor.h" + +Graph::Graph() : inputTensor_({}), outputTensor_(nullptr) {} + +void Graph::addLayer(Layer& lay) { + if (layers_.find(lay.getID()) == layers_.end()) { + layers_[lay.getID()] = &lay; + } +} + +void Graph::addEdge(Layer& layPrev, Layer& layNext) { + if (layPrev.getID() == layNext.getID()) { + throw std::invalid_argument("Cannot add edge from a layer to itself."); + } + if (layers_.find(layPrev.getID()) == layers_.end()) { + addLayer(layPrev); + } + if (layers_.find(layNext.getID()) == layers_.end()) { + addLayer(layNext); + } + layPrev.addNeighbor(&layNext); +} + +void Graph::removeEdge(Layer& layPrev, Layer& layNext) { + if (layers_.find(layPrev.getID()) != layers_.end()) { + layPrev.removeNeighbor(&layNext); + } +} + +void Graph::removeLayer(Layer& lay) { + int layer_id = lay.getID(); + + if (layers_.find(layer_id) == layers_.end()) { + return; + } + + for (auto& pair : layers_) { + pair.second->removeNeighbor(&lay); + } + + auto it = layers_.find(layer_id); + if (it != layers_.end()) { + layers_.erase(it); + } + + if (start_ == layer_id) { + start_ = -1; + } + if (end_ == layer_id) { + end_ = -1; + } +} + +int Graph::getLayers() const { return static_cast(layers_.size()); } + +int Graph::getEdges() const { + int count = 0; + for (const auto& layer : layers_) { + count += layer.second->neighbors_.size(); + } + return count; +} + +bool Graph::empty() const { return layers_.empty(); } + +bool Graph::bfs_helper(int start, int vert, bool flag, + std::vector* v_ord) const { + std::unordered_map visited; + std::queue queue; + + queue.push(start); + visited[start] = true; + + while (!queue.empty()) { + int current = queue.front(); + queue.pop(); + + if (flag && current == vert) { + return true; + } + + if (v_ord != nullptr) { + v_ord->push_back(current); + } + + if (layers_.count(current) > 0) { + Layer* current_layer = layers_.at(current); + + for (Layer* neighbor : current_layer->neighbors_) { + if (visited.find(neighbor->getID()) == visited.end()) { + visited[neighbor->getID()] = true; + queue.push(neighbor->getID()); + } + } + } + } + + return false; +} + +bool Graph::hasPath(Layer& layPrev, Layer& layNext) const { + if (layers_.find(layPrev.getID()) == layers_.end() || + layers_.find(layNext.getID()) == layers_.end()) { + return false; + } + return bfs_helper(layPrev.getID(), layNext.getID(), true, nullptr); +} + +std::vector Graph::BFS(int start) { + std::vector v_ord; + bfs_helper(start, -1, false, &v_ord); + return v_ord; +} + +void Graph::setInput(Layer& lay, Tensor& vec) { + if (start_ != -1) { + throw std::runtime_error("Input layer already set."); + } + if (!layers_.empty()) { + addLayer(lay); + } + inputTensor_ = vec; + start_ = lay.getID(); +} + +void Graph::setOutput(Layer& lay, Tensor& vec) { + if (end_ != -1) { + throw std::runtime_error("Output layer already set."); + } + + if (layers_.find(lay.getID()) == layers_.end()) { + addLayer(lay); + } + + end_ = lay.getID(); + outputTensor_ = &vec; +} + +void Graph::inference() { + if (start_ == -1 || end_ == -1) { + throw std::runtime_error("Input or output layer not set."); + } + + std::vector traversal = BFS(start_); + + if (traversal.empty() || traversal.back() != end_) { + throw std::runtime_error("No path from start to end layer found."); + } + + Tensor current_tensor = inputTensor_; + + for (int layer_id : traversal) { + if (layers_.find(layer_id) == layers_.end()) { + throw std::runtime_error("layer_id out of range in traversal."); + } + Layer* current_layer = layers_[layer_id]; + Tensor temp_output_tensor(current_layer->get_output_shape()); + current_layer->run(current_tensor, temp_output_tensor); + current_tensor = temp_output_tensor; + + if (layer_id == end_) { + if (outputTensor_ == nullptr) { + throw std::runtime_error("Output tensor pointer is not set."); + } + *outputTensor_ = current_tensor; + } + } +} + +Graph::~Graph() = default; \ No newline at end of file diff --git a/src/layer/layer.cpp b/src/layer/layer.cpp new file mode 100644 index 0000000..fb20e90 --- /dev/null +++ b/src/layer/layer.cpp @@ -0,0 +1,9 @@ +#include "./layer/layer.h" + +void Layer::addNeighbor(Layer* neighbor) { + if (neighbor != nullptr) { + neighbors_.push_back(neighbor); + } +} + +void Layer::removeNeighbor(Layer* neighbor) { neighbors_.remove(neighbor); } \ No newline at end of file diff --git a/src/tensor/tensor.cpp b/src/tensor/tensor.cpp new file mode 100644 index 0000000..2c57e0b --- /dev/null +++ b/src/tensor/tensor.cpp @@ -0,0 +1,14 @@ +#include "./tensor/tensor.h" + +#include +#include +#include +#include + +Shape::Shape(std::vector dims) : dimensions(std::move(dims)) { + total_elements = std::accumulate(dimensions.begin(), dimensions.end(), + static_cast(1), + [](size_t a, size_t b) { return a * b; }); +} + +size_t Shape::get_rank() const { return dimensions.size(); } \ No newline at end of file diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 9dada0c..049ad49 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,6 +1,11 @@ -file(GLOB_RECURSE TEST_SRC_FILES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) -add_executable(run_tests ${TEST_SRC_FILES}) -target_link_libraries(run_tests PUBLIC - gtest_main -) +file(GLOB_RECURSE TEST_FILES ./*.cpp) + +set(TestsName "run_test") + +add_executable(${TestsName} ${TEST_FILES}) + +target_link_libraries(${TestsName} PRIVATE ${ProjectName} gtest) + +enable_testing() +add_test(NAME ${TestsName} COMMAND ${TestsName}) \ No newline at end of file diff --git a/include/CMakeLists.txt b/test/graph/test_graph.cpp similarity index 100% rename from include/CMakeLists.txt rename to test/graph/test_graph.cpp diff --git a/test/main.cpp b/test/main.cpp index 4d820af..9a17845 100644 --- a/test/main.cpp +++ b/test/main.cpp @@ -1,6 +1,6 @@ -#include +#include "gtest/gtest.h" -int main(int argc, char **argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); -} +} \ No newline at end of file diff --git a/test/tensor/test_tensor.cpp b/test/tensor/test_tensor.cpp new file mode 100644 index 0000000..f21fd86 --- /dev/null +++ b/test/tensor/test_tensor.cpp @@ -0,0 +1,81 @@ +#include + +#include "./tensor/tensor.h" +#include "gtest/gtest.h" + +TEST(ShapeTest, get_rank_and_elem_checks) { + Shape s({2, 3, 4}); + + ASSERT_EQ(s.get_rank(), 3); + ASSERT_EQ(s.total_elements, 24); +} + +TEST(TensorTestDouble, can_at_to_tensor) { + Tensor t({2, 3}, Layout::kNd); + t.at({0, 0}) = 1.0; + t.at({0, 1}) = 2.0; + t.at({0, 2}) = 3.0; + t.at({1, 0}) = 4.0; + t.at({1, 1}) = 5.0; + t.at({1, 2}) = 6.0; + + ASSERT_DOUBLE_EQ(t.at({0, 1}), 2.0); + ASSERT_DOUBLE_EQ(t.at({0, 2}), 4.0); + ASSERT_DOUBLE_EQ(t.at({1, 0}), 4.0); + ASSERT_DOUBLE_EQ(t.at({1, 1}), 5.0); + ASSERT_DOUBLE_EQ(t.at({1, 2}), 6.0); + + const Tensor &ct = t; + + ASSERT_DOUBLE_EQ(ct.at({0, 1}), 2.0); +} + +TEST(TensorTestDouble, can_get_linear_index2D_ND) { + Tensor t({2, 3}, Layout::kNd); + + ASSERT_EQ(t.get_linear_index({0, 0}), 0); + ASSERT_EQ(t.get_linear_index({0, 2}), 2); + ASSERT_EQ(t.get_linear_index({1, 0}), 2); + ASSERT_EQ(t.get_linear_index({1, 2}), 4); +} + +TEST(TensorTestDouble, can_get_linear_index4D_NCHW) { + Tensor t({2, 3, 4, 5}, Layout::kNchw); + + ASSERT_EQ(t.get_linear_index({0, 0, 0, 0}), 0); + ASSERT_EQ(t.get_linear_index({1, 2, 3, 4}), 119); +} + +TEST(TensorTestDouble, can_get_linear_index4D_NHWC) { + Tensor t({2, 3, 4, 5}, Layout::kNhwc); + + ASSERT_EQ(t.get_linear_index({0, 0, 0, 0}), 0); + ASSERT_EQ(t.get_linear_index({1, 2, 3, 4}), 119); +} +TEST(TensorTestDouble, can_get_linear_index4D_ND) { + Tensor t4d_nd({2, 3, 4, 5}, Layout::kNd); + + ASSERT_EQ(t4d_nd.get_linear_index({0, 0, 0, 0}), 0); + ASSERT_EQ(t4d_nd.get_linear_index({1, 2, 3, 4}), 119); +} + +TEST(TensorTestDouble, cant_get_linear_index_out_of_bounds) { + Tensor t2d({2, 3}, Layout::kNd); + Tensor t4d_nchw({2, 3, 4, 5}, Layout::kNchw); + Tensor t4d_nhwc({2, 3, 4, 5}, Layout::kNhwc); + Tensor t4d_nd({2, 3, 4, 5}, Layout::kNd); + + EXPECT_THROW(t2d.get_linear_index({2, 0}), std::out_of_range); + EXPECT_THROW(t2d.get_linear_index({0, 3}), std::out_of_range); + EXPECT_THROW(t4d_nchw.get_linear_index({2, 0, 0, 0}), std::out_of_range); + EXPECT_THROW(t4d_nhwc.get_linear_index({0, 3, 0, 0}), std::out_of_range); + EXPECT_THROW(t4d_nd.get_linear_index({0, 0, 4, 0}), std::out_of_range); +} + +TEST(TensorTestDouble, cant_get_linear_index_with_wrong_num_of_indicies) { + Tensor t2d({2, 3}, Layout::kNd); + Tensor t4d_nchw({2, 3, 4, 5}, Layout::kNchw); + + EXPECT_THROW(t2d.get_linear_index({0}), std::runtime_error); + EXPECT_THROW(t4d_nchw.get_linear_index({0, 0, 0}), std::runtime_error); +} \ No newline at end of file