diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 56e2daa6..9fbbc629 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -66,7 +66,7 @@ jobs: run: | mkdir -p ${{ env.CCACHE_DIR }} git --version - git clone --revision=${{ env.LLVM_COMMIT }} https://github.com/llvm/llvm-project.git + git clone --depth 1 --filter=blob:none --revision=${{ env.LLVM_COMMIT }} https://github.com/llvm/llvm-project.git cd llvm-project mkdir build && cd build cmake -G Ninja ../llvm \ diff --git a/include/NeuraDialect/Architecture/Architecture.h b/include/NeuraDialect/Architecture/Architecture.h index 07e09b1e..7dcbad9e 100644 --- a/include/NeuraDialect/Architecture/Architecture.h +++ b/include/NeuraDialect/Architecture/Architecture.h @@ -453,6 +453,7 @@ class Architecture { Architecture(int multi_cgra_rows, int multi_cgra_columns, BaseTopology multi_cgra_base_topology = BaseTopology::MESH, int per_cgra_rows = 4, int per_cgra_columns = 4, + int max_ctrl_mem_items = 20, BaseTopology per_cgra_base_topology = BaseTopology::MESH, const TileDefaults &tile_defaults = TileDefaults(), const std::vector &tile_overrides = @@ -468,6 +469,7 @@ class Architecture { int getMultiCgraColumns() const { return multi_cgra_columns_; } int getPerCgraRows() const { return per_cgra_rows_; } int getPerCgraColumns() const { return per_cgra_columns_; } + int getMaxCtrlMemItems() const { return max_ctrl_mem_items_; } Link *getLink(int id); Link *getLink(int src_tile_x, int src_tile_y, int dst_tile_x, int dst_tile_y); @@ -520,8 +522,11 @@ class Architecture { int multi_cgra_columns_; int per_cgra_rows_; int per_cgra_columns_; + int max_ctrl_mem_items_; }; +// Function for getting the architecture object. +const Architecture &getArchitecture(); } // namespace neura } // namespace mlir diff --git a/include/NeuraDialect/Architecture/ArchitectureSpec.h b/include/NeuraDialect/Architecture/ArchitectureSpec.h index 2df350f7..70ee0033 100644 --- a/include/NeuraDialect/Architecture/ArchitectureSpec.h +++ b/include/NeuraDialect/Architecture/ArchitectureSpec.h @@ -68,13 +68,6 @@ struct LinkOverride { bool existence = true; }; -// Function for getting the architecture specification file path. -// This is set by the command line tool when a YAML file is provided. -std::string getArchitectureSpecFile(); - -// Function for getting tile defaults configuration. -TileDefaults getTileDefaults(); - } // namespace neura } // namespace mlir diff --git a/include/NeuraDialect/Util/ArchParser.h b/include/NeuraDialect/Util/ArchParser.h new file mode 100644 index 00000000..81963e04 --- /dev/null +++ b/include/NeuraDialect/Util/ArchParser.h @@ -0,0 +1,32 @@ +#ifndef NEURA_ARCH_PARSER_H +#define NEURA_ARCH_PARSER_H + +#include "NeuraDialect/Architecture/Architecture.h" +#include "llvm/Support/YAMLParser.h" +#include "mlir/Support/LogicalResult.h" + +namespace mlir { +namespace neura { +namespace util { +class ArchParser { +public: + ArchParser(const std::string &architecture_spec_file); + ~ArchParser() = default; + + mlir::FailureOr getArchitecture(); + +private: + std::string architecture_spec_file; + bool parseArchitectureYaml( + llvm::yaml::Document &doc, int &multi_cgra_rows, int &multi_cgra_columns, + mlir::neura::BaseTopology &multi_cgra_base_topology, int &per_cgra_rows, + int &per_cgra_columns, mlir::neura::BaseTopology &per_cgra_base_topology, + int &max_ctrl_mem_items, mlir::neura::TileDefaults &tile_defaults, + std::vector &tile_overrides, + mlir::neura::LinkDefaults &link_defaults, + std::vector &link_overrides); +}; +} // namespace util +} // namespace neura +} // namespace mlir +#endif // NEURA_ARCH_PARSER_H diff --git a/include/NeuraDialect/Util/ParserUtils.h b/include/NeuraDialect/Util/ParserUtils.h new file mode 100644 index 00000000..78eb7b9d --- /dev/null +++ b/include/NeuraDialect/Util/ParserUtils.h @@ -0,0 +1,38 @@ +#ifndef NEURA_PARSER_UTILS_H +#define NEURA_PARSER_UTILS_H + +#include "NeuraDialect/Architecture/ArchitectureSpec.h" +#include "NeuraDialect/Util/NeuraYamlKeys.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/YAMLParser.h" +#include "llvm/Support/raw_ostream.h" + +namespace mlir { +namespace neura { +namespace util { +bool parseYamlScalarInt(const llvm::yaml::Node *node, int &result); +bool parseYamlScalarString(const llvm::yaml::Node *node, std::string &result); +void parseYamlStringSequence(llvm::yaml::Node *node, + std::vector &result); + +bool yamlParseError(const std::string &msg, const std::string &file = ""); + +void parseTileDefaults(llvm::yaml::MappingNode *tile_defaults_map, + mlir::neura::TileDefaults &tile_defaults); +void parseTileOverrideOperations(llvm::yaml::MappingNode *override_map, + mlir::neura::TileOverride &override); +void parseSingleTileOverride(llvm::yaml::MappingNode *override_map, + mlir::neura::TileOverride &override); +bool parseTileOverrides(llvm::yaml::SequenceNode *tile_overrides_seq, + std::vector &tile_overrides); +bool parseLinkDefaults(llvm::yaml::MappingNode *link_defaults_map, + mlir::neura::LinkDefaults &link_defaults); +void parseSingleLinkOverride(llvm::yaml::MappingNode *override_map, + mlir::neura::LinkOverride &override); +bool parseLinkOverrides(llvm::yaml::SequenceNode *link_overrides_seq, + std::vector &link_overrides); +mlir::neura::BaseTopology parseTopologyString(const std::string &topology_str); +} // namespace util +} // namespace neura +} // namespace mlir +#endif // NEURA_PARSER_UTILS_H diff --git a/lib/NeuraDialect/Architecture/Architecture.cpp b/lib/NeuraDialect/Architecture/Architecture.cpp index a6c2c5a2..9bb99db9 100644 --- a/lib/NeuraDialect/Architecture/Architecture.cpp +++ b/lib/NeuraDialect/Architecture/Architecture.cpp @@ -1,5 +1,4 @@ #include "NeuraDialect/Architecture/Architecture.h" -#include "NeuraDialect/Architecture/ArchitectureSpec.h" #include "llvm/Support/raw_ostream.h" #include #include @@ -389,7 +388,8 @@ void Architecture::createLinkIfValid(int &link_id, Tile *src_tile, int dst_x, const LinkDefaults &link_defaults) { if (dst_x >= 0 && dst_x < getPerCgraColumns() && dst_y >= 0 && dst_y < getPerCgraRows()) { - // Checks if the destination tile actually exists (not removed by tile_overrides). + // Checks if the destination tile actually exists (not removed by + // tile_overrides). auto it = coord_to_tile_.find({dst_x, dst_y}); if (it != coord_to_tile_.end()) { createSingleLink(link_id, src_tile, it->second, link_defaults); @@ -553,6 +553,7 @@ void Architecture::applyLinkOverrides( Architecture::Architecture(int multi_cgra_rows, int multi_cgra_columns, BaseTopology multi_cgra_base_topology, int per_cgra_rows, int per_cgra_columns, + int max_ctrl_mem_items, BaseTopology per_cgra_base_topology, const TileDefaults &tile_defaults, const std::vector &tile_overrides, @@ -565,6 +566,7 @@ Architecture::Architecture(int multi_cgra_rows, int multi_cgra_columns, // this->multi_cgra_base_topology_ = multi_cgra_base_topology; this->per_cgra_rows_ = per_cgra_rows; this->per_cgra_columns_ = per_cgra_columns; + this->max_ctrl_mem_items_ = max_ctrl_mem_items; // Initializes architecture components using helper methods. initializeTiles(per_cgra_rows, per_cgra_columns); diff --git a/lib/NeuraDialect/CMakeLists.txt b/lib/NeuraDialect/CMakeLists.txt index 131a28cb..10846609 100644 --- a/lib/NeuraDialect/CMakeLists.txt +++ b/lib/NeuraDialect/CMakeLists.txt @@ -10,6 +10,8 @@ add_mlir_dialect_library(MLIRNeura Mapping/HeuristicMapping/HeuristicMapping.cpp Architecture/Architecture.cpp Transforms/GraphMining/GraMi.cpp + Util/ArchParser.cpp + Util/ParserUtils.cpp ADDITIONAL_HEADER_DIRS ${PROJECT_SOURCE_DIR}/include/NeuraDialect diff --git a/lib/NeuraDialect/NeuraPasses.cpp b/lib/NeuraDialect/NeuraPasses.cpp index 72a78ec7..26c1b6f2 100644 --- a/lib/NeuraDialect/NeuraPasses.cpp +++ b/lib/NeuraDialect/NeuraPasses.cpp @@ -9,6 +9,7 @@ #include "NeuraDialect/NeuraOps.h" #include "NeuraDialect/NeuraPasses.h" #include "NeuraDialect/NeuraTypes.h" +#include "mlir/Transforms/ViewOpGraph.h" std::string filename = "opgraph.dot"; std::error_code EC; @@ -20,8 +21,11 @@ void mlir::neura::registerNeuraConversionPassPipeline() { "neura-conversion", "Convert all dialects to Neura dialect", [](OpPassManager &pm) { pm.addPass(mlir::neura::createAssignAcceleratorPass()); - // Convert all the other dialects into the Neura dialect + + pm.addPass(mlir::createLowerAffineToNeuraPass()); pm.addPass(mlir::createLowerArithToNeuraPass()); + pm.addPass(mlir::createLowerMemRefToNeuraPass()); + pm.addPass(mlir::createLowerBuiltinToNeuraPass()); pm.addPass(mlir::createLowerLlvmToNeuraPass()); pm.addPass(mlir::createPrintOpGraphPass(os)); @@ -38,5 +42,8 @@ void mlir::neura::registerNeuraConversionPassPipeline() { pm.addPass(mlir::neura::createFusePatternPass()); pm.addPass(mlir::neura::createInsertDataMovPass()); pm.addPass(mlir::createPrintOpGraphPass(os)); + + pm.addPass(mlir::neura::createMapToAcceleratorPass()); + pm.addPass(mlir::neura::createGenerateCodePass()); }); } diff --git a/lib/NeuraDialect/Transforms/GenerateCodePass.cpp b/lib/NeuraDialect/Transforms/GenerateCodePass.cpp index 18766349..3c2a7699 100644 --- a/lib/NeuraDialect/Transforms/GenerateCodePass.cpp +++ b/lib/NeuraDialect/Transforms/GenerateCodePass.cpp @@ -267,16 +267,7 @@ struct Topology { static Topology getTopologyFromArchitecture(int per_cgra_rows, int per_cgra_columns) { Topology topo; - mlir::neura::Architecture architecture(1, - 1, - mlir::neura::BaseTopology::MESH, - per_cgra_rows, - per_cgra_columns, - mlir::neura::BaseTopology::MESH, - mlir::neura::TileDefaults{}, - std::vector{}, - mlir::neura::LinkDefaults{}, - std::vector{}); + const Architecture &architecture = mlir::neura::getArchitecture(); for (auto *tile : architecture.getAllTiles()) { topo.tile_location[tile->getId()] = {tile->getX(), tile->getY()}; @@ -411,7 +402,9 @@ struct GenerateCodePass } std::pair getArrayDimensions(func::FuncOp function) { - int columns = 4, rows = 4; // default 4x4 CGRA. + const Architecture &architecture = mlir::neura::getArchitecture(); + int columns = architecture.getPerCgraColumns(); + int rows = architecture.getPerCgraRows(); if (auto mapping_info = function->getAttrOfType(attr::kMappingInfo)) { if (auto x_tiles = dyn_cast_or_null(mapping_info.get(attr::kXTiles))) columns = x_tiles.getInt(); if (auto y_tiles = dyn_cast_or_null(mapping_info.get(attr::kYTiles))) rows = y_tiles.getInt(); diff --git a/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp b/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp index 76f6ec57..700c2b4d 100644 --- a/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp +++ b/lib/NeuraDialect/Transforms/MapToAcceleratorPass.cpp @@ -4,7 +4,6 @@ #include "Common/AcceleratorAttrs.h" #include "NeuraDialect/Architecture/Architecture.h" -#include "NeuraDialect/Architecture/ArchitectureSpec.h" #include "NeuraDialect/Mapping/HeuristicMapping/HeuristicMapping.h" #include "NeuraDialect/Mapping/MappingState.h" #include "NeuraDialect/Mapping/mapping_util.h" @@ -32,385 +31,6 @@ using namespace mlir::neura::yamlkeys; #define GEN_PASS_DEF_MAPTOACCELERATOR #include "NeuraDialect/NeuraPasses.h.inc" -// ----------------------------------------------------------------------------- -// Utility: Extracts an integer from a YAML ScalarNode. Returns true on success. -static bool parseYamlScalarInt(const llvm::yaml::Node *node, int &result) { - auto *scalar = llvm::dyn_cast_or_null(node); - if (!scalar) - return false; - llvm::SmallString<64> value_string; - llvm::StringRef value_ref = scalar->getValue(value_string); - long long temp_value = 0; - if (value_ref.getAsInteger(10, temp_value)) - return false; - result = static_cast(temp_value); - return true; -} - -// Utility: Extracts a string from a YAML ScalarNode. Returns true on success. -static bool parseYamlScalarString(const llvm::yaml::Node *node, - std::string &result) { - auto *scalar = llvm::dyn_cast_or_null(node); - if (!scalar) - return false; - llvm::SmallString<64> value_string; - llvm::StringRef value_ref = scalar->getValue(value_string); - result = value_ref.str(); - return true; -} - -// Utility: Extracts a vector of strings from a YAML SequenceNode. -static void parseYamlStringSequence(llvm::yaml::Node *node, - std::vector &result) { - auto *seq = llvm::dyn_cast_or_null(node); - if (!seq) - return; - result.clear(); - for (auto &item : *seq) { - std::string value; - if (parseYamlScalarString(&item, value)) - result.push_back(value); - } -} - -// Utility: Print YAML parse error and return false. -static bool yamlParseError(const std::string &msg, - const std::string &file = "") { - llvm::errs() << "[MapToAcceleratorPass] YAML parse error"; - if (!file.empty()) - llvm::errs() << " in: " << file; - llvm::errs() << ": " << msg << "\n"; - return false; -} - -// ----------------------------------------------------------------------------- -// Helper function to parse tile defaults. -void parseTileDefaults(llvm::yaml::MappingNode *tile_defaults_map, - mlir::neura::TileDefaults &tile_defaults) { - for (auto &key_value_pair : *tile_defaults_map) { - auto *key_node = - llvm::dyn_cast_or_null(key_value_pair.getKey()); - if (!key_node) - continue; - llvm::SmallString<64> key_string; - llvm::StringRef key_ref = key_node->getValue(key_string); - - if (key_ref == kNumRegisters) { - int temp_value = 0; - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - tile_defaults.num_registers = temp_value; - } else if (key_ref == kFuTypes) { - parseYamlStringSequence(key_value_pair.getValue(), - tile_defaults.function_units); - } else { - llvm::errs() << "[MapToAcceleratorPass] Unknown tile_defaults key: " - << key_ref << "\n"; - } - } -} - -// Helper function to parse tile override operations and registers. -void parseTileOverrideOperations(llvm::yaml::MappingNode *override_map, - mlir::neura::TileOverride &override) { - for (auto &key_value_pair : *override_map) { - auto *key_node = - llvm::dyn_cast_or_null(key_value_pair.getKey()); - if (!key_node) - continue; - llvm::SmallString<64> key_string; - llvm::StringRef key_ref = key_node->getValue(key_string); - - if (key_ref == kFuTypes) { - parseYamlStringSequence(key_value_pair.getValue(), override.fu_types); - } else if (key_ref == kNumRegisters) { - int temp_value = 0; - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.num_registers = temp_value; - } else { - llvm::errs() << "[MapToAcceleratorPass] Unknown tile_override key: " - << key_ref << "\n"; - } - } -} - -// Helper function to parse a single tile override. -void parseSingleTileOverride(llvm::yaml::MappingNode *override_map, - mlir::neura::TileOverride &override) { - for (auto &key_value_pair : *override_map) { - auto *key_node = - llvm::dyn_cast_or_null(key_value_pair.getKey()); - if (!key_node) - continue; - llvm::SmallString<64> key_string; - llvm::StringRef key_ref = key_node->getValue(key_string); - - int temp_value = 0; - if (key_ref == kCgraX) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.cgra_x = temp_value; - } else if (key_ref == kCgraY) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.cgra_y = temp_value; - } else if (key_ref == kTileX) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.tile_x = temp_value; - } else if (key_ref == kTileY) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.tile_y = temp_value; - } else if (key_ref == kFuTypes) { - parseYamlStringSequence(key_value_pair.getValue(), override.fu_types); - } else if (key_ref == kNumRegisters) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.num_registers = temp_value; - } else if (key_ref == kExistence) { - std::string value; - if (parseYamlScalarString(key_value_pair.getValue(), value)) { - override.existence = - (value == "true" || value == "True" || value == "1"); - } - } else { - llvm::errs() << "[MapToAcceleratorPass] Unknown tile_override key: " - << key_ref << "\n"; - } - } -} - -// Helper function to parse tile overrides. -bool parseTileOverrides( - llvm::yaml::SequenceNode *tile_overrides_seq, - std::vector &tile_overrides) { - for (auto &override_node : *tile_overrides_seq) { - auto *override_map = - llvm::dyn_cast_or_null(&override_node); - if (!override_map) - continue; - mlir::neura::TileOverride override; - parseSingleTileOverride(override_map, override); - tile_overrides.push_back(override); - } - return true; -} - -// Helper function to parse link defaults. -bool parseLinkDefaults(llvm::yaml::MappingNode *link_defaults_map, - mlir::neura::LinkDefaults &link_defaults) { - for (auto &key_value_pair : *link_defaults_map) { - auto *key_node = - llvm::dyn_cast_or_null(key_value_pair.getKey()); - if (!key_node) - continue; - llvm::SmallString<64> key_string; - llvm::StringRef key_ref = key_node->getValue(key_string); - - int temp_value = 0; - if (key_ref == kLatency) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - link_defaults.latency = temp_value; - } else if (key_ref == kBandwidth) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - link_defaults.bandwidth = temp_value; - } else { - llvm::errs() << "[MapToAcceleratorPass] Unknown link_defaults key: " - << key_ref << "\n"; - } - } - return true; -} - -// Helper function to parse a single link override. -void parseSingleLinkOverride(llvm::yaml::MappingNode *override_map, - mlir::neura::LinkOverride &override) { - for (auto &key_value_pair : *override_map) { - auto *key_node = - llvm::dyn_cast_or_null(key_value_pair.getKey()); - if (!key_node) - continue; - llvm::SmallString<64> key_string; - llvm::StringRef key_ref = key_node->getValue(key_string); - - int temp_value = 0; - if (key_ref == kLatency) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.latency = temp_value; - } else if (key_ref == kBandwidth) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.bandwidth = temp_value; - } else if (key_ref == kSrcTileX) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.src_tile_x = temp_value; - } else if (key_ref == kSrcTileY) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.src_tile_y = temp_value; - } else if (key_ref == kDstTileX) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.dst_tile_x = temp_value; - } else if (key_ref == kDstTileY) { - if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) - override.dst_tile_y = temp_value; - } else if (key_ref == kExistence) { - std::string value; - if (parseYamlScalarString(key_value_pair.getValue(), value)) { - override.existence = - (value == "true" || value == "True" || value == "1"); - } - } else { - llvm::errs() << "[MapToAcceleratorPass] Unknown link_override key: " - << key_ref << "\n"; - } - } -} - -// Helper function to parse link overrides. -bool parseLinkOverrides( - llvm::yaml::SequenceNode *link_overrides_seq, - std::vector &link_overrides) { - for (auto &override_node : *link_overrides_seq) { - auto *override_map = - llvm::dyn_cast_or_null(&override_node); - if (!override_map) - continue; - mlir::neura::LinkOverride override; - parseSingleLinkOverride(override_map, override); - link_overrides.push_back(override); - } - return true; -} - -// Helper function to parse topology string to BaseTopology enum -mlir::neura::BaseTopology parseTopologyString(const std::string &topology_str) { - if (topology_str == kMesh) { - return mlir::neura::BaseTopology::MESH; - } else if (topology_str == kKingMesh || topology_str == kKingMeshAlt) { - return mlir::neura::BaseTopology::KING_MESH; - } else if (topology_str == kRing) { - return mlir::neura::BaseTopology::RING; - } else { - // Default to mesh if unknown topology - return mlir::neura::BaseTopology::MESH; - } -} - -// Helper function to parse architecture YAML configuration. -bool parseArchitectureYaml( - llvm::yaml::Document &doc, int &multi_cgra_rows, int &multi_cgra_columns, - mlir::neura::BaseTopology &multi_cgra_base_topology, int &per_cgra_rows, - int &per_cgra_columns, mlir::neura::BaseTopology &per_cgra_base_topology, - int &max_ctrl_mem_items, mlir::neura::TileDefaults &tile_defaults, - std::vector &tile_overrides, - mlir::neura::LinkDefaults &link_defaults, - std::vector &link_overrides) { - auto *root = doc.getRoot(); - if (!root) - return yamlParseError("Empty YAML document"); - auto *root_map = llvm::dyn_cast(root); - if (!root_map) - return yamlParseError("YAML root is not a mapping"); - - for (auto &key_value_pair : *root_map) { - auto *key_node = - llvm::dyn_cast_or_null(key_value_pair.getKey()); - if (!key_node) - continue; - llvm::SmallString<64> key_string; - llvm::StringRef key_ref = key_node->getValue(key_string); - - if (key_ref == kArchitecture) { - // Not used in this parser, but could be handled here. - continue; - } else if (key_ref == kMultiCgraDefaults) { - auto *multi_cgra_map = llvm::dyn_cast_or_null( - key_value_pair.getValue()); - if (!multi_cgra_map) - continue; - for (auto &multi_cgra_map_key_value_pair : *multi_cgra_map) { - auto *multi_cgra_map_key_node = - llvm::dyn_cast_or_null( - multi_cgra_map_key_value_pair.getKey()); - if (!multi_cgra_map_key_node) - continue; - llvm::SmallString<64> multi_cgra_map_key_string; - llvm::StringRef multi_cgra_map_key_ref = - multi_cgra_map_key_node->getValue(multi_cgra_map_key_string); - int temp_value = 0; - if (multi_cgra_map_key_ref == kRows) { - if (parseYamlScalarInt(multi_cgra_map_key_value_pair.getValue(), - temp_value)) - multi_cgra_rows = temp_value; - } else if (multi_cgra_map_key_ref == kColumns) { - if (parseYamlScalarInt(multi_cgra_map_key_value_pair.getValue(), - temp_value)) - multi_cgra_columns = temp_value; - } else if (multi_cgra_map_key_ref == kBaseTopology) { - std::string topo_str; - if (parseYamlScalarString(multi_cgra_map_key_value_pair.getValue(), - topo_str)) - multi_cgra_base_topology = parseTopologyString(topo_str); - } - } - } else if (key_ref == kPerCgraDefaults) { - auto *per_cgra_map = llvm::dyn_cast_or_null( - key_value_pair.getValue()); - if (!per_cgra_map) - continue; - for (auto &per_cgra_map_key_value_pair : *per_cgra_map) { - auto *per_cgra_map_key_node = - llvm::dyn_cast_or_null( - per_cgra_map_key_value_pair.getKey()); - if (!per_cgra_map_key_node) - continue; - llvm::SmallString<64> per_cgra_map_key_string; - llvm::StringRef per_cgra_map_key_ref = - per_cgra_map_key_node->getValue(per_cgra_map_key_string); - int temp_value = 0; - if (per_cgra_map_key_ref == kRows) { - if (parseYamlScalarInt(per_cgra_map_key_value_pair.getValue(), - temp_value)) - per_cgra_rows = temp_value; - } else if (per_cgra_map_key_ref == kColumns) { - if (parseYamlScalarInt(per_cgra_map_key_value_pair.getValue(), - temp_value)) - per_cgra_columns = temp_value; - } else if (per_cgra_map_key_ref == kBaseTopology) { - std::string topo_str; - if (parseYamlScalarString(per_cgra_map_key_value_pair.getValue(), - topo_str)) - per_cgra_base_topology = parseTopologyString(topo_str); - } else if (per_cgra_map_key_ref == kCtrlMemItems) { - if (parseYamlScalarInt(per_cgra_map_key_value_pair.getValue(), - temp_value)) - max_ctrl_mem_items = temp_value; - } - } - } else if (key_ref == kTileDefaults) { - auto *tile_defaults_map = llvm::dyn_cast_or_null( - key_value_pair.getValue()); - if (tile_defaults_map) - parseTileDefaults(tile_defaults_map, tile_defaults); - } else if (key_ref == kTileOverrides) { - auto *tile_overrides_seq = - llvm::dyn_cast_or_null( - key_value_pair.getValue()); - if (tile_overrides_seq) - parseTileOverrides(tile_overrides_seq, tile_overrides); - } else if (key_ref == kLinkDefaults) { - auto *link_defaults_map = llvm::dyn_cast_or_null( - key_value_pair.getValue()); - if (link_defaults_map) - parseLinkDefaults(link_defaults_map, link_defaults); - } else if (key_ref == kLinkOverrides) { - auto *link_overrides_seq = - llvm::dyn_cast_or_null( - key_value_pair.getValue()); - if (link_overrides_seq) - parseLinkOverrides(link_overrides_seq, link_overrides); - } else { - llvm::errs() << "[MapToAcceleratorPass] Unknown YAML root key: " - << key_ref << "\n"; - } - } - return true; -} - namespace { struct MapToAcceleratorPass : public PassWrapper> { @@ -571,71 +191,8 @@ struct MapToAcceleratorPass return; } - // Handle architecture specification file - constexpr int kMultiCgraDefaultRows = 1; - constexpr int kMultiCgraDefaultColumns = 1; - constexpr int kPerCgraDefaultRows = 4; - constexpr int kPerCgraDefaultColumns = 4; - constexpr int kDefaultMaxCtrlMemItems = 20; - - std::string architecture_spec_file = mlir::neura::getArchitectureSpecFile(); - int multi_cgra_rows = kMultiCgraDefaultRows; - int multi_cgra_columns = kMultiCgraDefaultColumns; - int per_cgra_rows = kPerCgraDefaultRows; - int per_cgra_columns = kPerCgraDefaultColumns; - int max_ctrl_mem_items = kDefaultMaxCtrlMemItems; - mlir::neura::TileDefaults tile_defaults; - std::vector tile_overrides; - mlir::neura::LinkDefaults link_defaults; - std::vector link_overrides; - mlir::neura::BaseTopology multi_cgra_base_topology = - mlir::neura::BaseTopology::MESH; - mlir::neura::BaseTopology per_cgra_base_topology = - mlir::neura::BaseTopology::MESH; - - if (!architecture_spec_file.empty()) { - - // Use LLVM YAML parser to validate the YAML syntax (no mapping yet) - llvm::ErrorOr> buffer_or_err = - llvm::MemoryBuffer::getFile(architecture_spec_file); - if (!buffer_or_err) { - llvm::errs() << "[MapToAcceleratorPass] Failed to open architecture " - "specification file: " - << architecture_spec_file << "\n"; - return; - } + const Architecture &architecture = mlir::neura::getArchitecture(); - llvm::SourceMgr sm; - sm.AddNewSourceBuffer(std::move(*buffer_or_err), llvm::SMLoc()); - llvm::yaml::Stream yaml_stream( - sm.getMemoryBuffer(sm.getMainFileID())->getBuffer(), sm); - - bool parse_failed = false; - llvm::yaml::Document &yaml_doc = *yaml_stream.begin(); - (void)yaml_doc; // ensure document is created - if (yaml_stream.failed()) { - parse_failed = true; - } - - if (parse_failed) { - llvm::errs() << "[MapToAcceleratorPass] YAML parse error in: " - << architecture_spec_file << "\n"; - return; - } - - // Parse YAML configuration - if (!parseArchitectureYaml( - yaml_doc, multi_cgra_rows, multi_cgra_columns, - multi_cgra_base_topology, per_cgra_rows, per_cgra_columns, - per_cgra_base_topology, max_ctrl_mem_items, tile_defaults, - tile_overrides, link_defaults, link_overrides)) { - return; - } - } else { - llvm::errs() << "[MapToAcceleratorPass] No architecture specification " - "file provided.\n"; - } - // assert(false); module.walk([&](func::FuncOp func) { // Skips functions not targeting the neura accelerator. auto accel_attr = @@ -693,16 +250,10 @@ struct MapToAcceleratorPass rec_mii = 1; // No recurrence cycles found, set MII to 1. } - // Always use full constructor with YAML configuration - Architecture architecture( - multi_cgra_rows, multi_cgra_columns, multi_cgra_base_topology, - per_cgra_rows, per_cgra_columns, per_cgra_base_topology, - tile_defaults, tile_overrides, link_defaults, link_overrides); int res_mii = calculateResMii(func, architecture); const int possible_min_ii = std::max(rec_mii, res_mii); - const int max_ii = - max_ctrl_mem_items; // Use YAML config (default 20 if not specified) + const int max_allowed_ii = architecture.getMaxCtrlMemItems(); std::vector topologically_sorted_ops = getTopologicallySortedOps(func); @@ -758,7 +309,7 @@ struct MapToAcceleratorPass << " (ALAP level: " << level << ")\n"; } // assert(false); - for (int ii = possible_min_ii; ii <= max_ii; ++ii) { + for (int ii = possible_min_ii; ii <= max_allowed_ii; ++ii) { llvm::errs() << "[MapToAcceleratorPass] Start mapping with target II of " << ii << "\n"; diff --git a/lib/NeuraDialect/Transforms/PromoteFuncArgToConstPass.cpp b/lib/NeuraDialect/Transforms/PromoteFuncArgToConstPass.cpp index e94c55d1..8db54b2e 100644 --- a/lib/NeuraDialect/Transforms/PromoteFuncArgToConstPass.cpp +++ b/lib/NeuraDialect/Transforms/PromoteFuncArgToConstPass.cpp @@ -19,6 +19,36 @@ using namespace mlir; #include "NeuraDialect/NeuraPasses.h.inc" namespace { +/** + * @brief Specializes a region by "internalizing" its input arguments as + * constants. + * + * This function performs a redirection of the dataflow. It identifies all + * input arguments of the entry block, creates a corresponding + * `neura::ConstantOp` for each, and re-links all internal operations to use + * these constants instead of the original block parameters. + * + * ### Example Transformation: + * * **Before:** + * @code + * func.func @compute(%arg0: i32) { + * %0 = arith.addi %arg0, %arg0 : i32 + * return %0 : i32 + * } + * @endcode + * * **After:** + * @code + * func.func @compute(%arg0: i32) { + * %0 = "neura.constant"() {value = "%arg0"} : () -> i32 + * %1 = arith.addi %0, %0 : i32 // Uses replaced + * return %1 : i32 + * } + * @endcode + * + * @param region The MLIR Region (typically a function body) to transform. + * @return Success if the transformation was applied (even if the region was + * empty). + */ LogicalResult promoteFunctionArgsToConstants(Region ®ion) { if (region.empty()) { return success(); diff --git a/lib/NeuraDialect/Util/ArchParser.cpp b/lib/NeuraDialect/Util/ArchParser.cpp new file mode 100644 index 00000000..f79c25ca --- /dev/null +++ b/lib/NeuraDialect/Util/ArchParser.cpp @@ -0,0 +1,206 @@ +#include "NeuraDialect/Util/ArchParser.h" +#include "NeuraDialect/Util/NeuraYamlKeys.h" +#include "NeuraDialect/Util/ParserUtils.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/raw_ostream.h" + +using namespace mlir::neura::yamlkeys; + +namespace mlir { +namespace neura { +namespace util { + +ArchParser::ArchParser(const std::string &architecture_spec_file) + : architecture_spec_file(architecture_spec_file) {} + +mlir::FailureOr ArchParser::getArchitecture() { + // Default values for architecture specification file. + constexpr int kMultiCgraDefaultRows = 1; + constexpr int kMultiCgraDefaultColumns = 1; + constexpr int kPerCgraDefaultRows = 4; + constexpr int kPerCgraDefaultColumns = 4; + constexpr int kDefaultMaxCtrlMemItems = 20; + + int multi_cgra_rows = kMultiCgraDefaultRows; + int multi_cgra_columns = kMultiCgraDefaultColumns; + int per_cgra_rows = kPerCgraDefaultRows; + int per_cgra_columns = kPerCgraDefaultColumns; + int max_ctrl_mem_items = kDefaultMaxCtrlMemItems; + mlir::neura::TileDefaults tile_defaults; + std::vector tile_overrides; + mlir::neura::LinkDefaults link_defaults; + std::vector link_overrides; + mlir::neura::BaseTopology multi_cgra_base_topology = + mlir::neura::BaseTopology::MESH; + mlir::neura::BaseTopology per_cgra_base_topology = + mlir::neura::BaseTopology::MESH; + + if (!architecture_spec_file.empty()) { + + // Use LLVM YAML parser to validate the YAML syntax (no mapping yet) + llvm::ErrorOr> buffer_or_err = + llvm::MemoryBuffer::getFile(architecture_spec_file); + if (!buffer_or_err) { + llvm::errs() << "Failed to open architecture " + "specification file: " + << architecture_spec_file << "\n"; + return failure(); + } + + llvm::SourceMgr sm; + sm.AddNewSourceBuffer(std::move(*buffer_or_err), llvm::SMLoc()); + llvm::yaml::Stream yaml_stream( + sm.getMemoryBuffer(sm.getMainFileID())->getBuffer(), sm); + + bool parse_failed = false; + llvm::yaml::Document &yaml_doc = *yaml_stream.begin(); + (void)yaml_doc; // ensure document is created + if (yaml_stream.failed()) { + parse_failed = true; + } + + if (parse_failed) { + llvm::errs() << "YAML parse error in: " << architecture_spec_file << "\n"; + return failure(); + } + + // Parse YAML configuration + if (!parseArchitectureYaml(yaml_doc, multi_cgra_rows, multi_cgra_columns, + multi_cgra_base_topology, per_cgra_rows, + per_cgra_columns, per_cgra_base_topology, + max_ctrl_mem_items, tile_defaults, + tile_overrides, link_defaults, link_overrides)) { + return failure(); + } + } else { + llvm::errs() << "No architecture specification " + "file provided.\n"; + } + + return Architecture(multi_cgra_rows, multi_cgra_columns, + multi_cgra_base_topology, per_cgra_rows, per_cgra_columns, + max_ctrl_mem_items, per_cgra_base_topology, tile_defaults, + tile_overrides, link_defaults, link_overrides); +} + +bool ArchParser::parseArchitectureYaml( + llvm::yaml::Document &doc, int &multi_cgra_rows, int &multi_cgra_columns, + mlir::neura::BaseTopology &multi_cgra_base_topology, int &per_cgra_rows, + int &per_cgra_columns, mlir::neura::BaseTopology &per_cgra_base_topology, + int &max_ctrl_mem_items, mlir::neura::TileDefaults &tile_defaults, + std::vector &tile_overrides, + mlir::neura::LinkDefaults &link_defaults, + std::vector &link_overrides) { + auto *root = doc.getRoot(); + if (!root) + return yamlParseError("Empty YAML document"); + auto *root_map = llvm::dyn_cast(root); + if (!root_map) + return yamlParseError("YAML root is not a mapping"); + + for (auto &key_value_pair : *root_map) { + auto *key_node = + llvm::dyn_cast_or_null(key_value_pair.getKey()); + if (!key_node) + continue; + llvm::SmallString<64> key_string; + llvm::StringRef key_ref = key_node->getValue(key_string); + + if (key_ref == kArchitecture) { + // Not used in this parser, but could be handled here. + continue; + } else if (key_ref == kMultiCgraDefaults) { + auto *multi_cgra_map = llvm::dyn_cast_or_null( + key_value_pair.getValue()); + if (!multi_cgra_map) + continue; + for (auto &multi_cgra_map_key_value_pair : *multi_cgra_map) { + auto *multi_cgra_map_key_node = + llvm::dyn_cast_or_null( + multi_cgra_map_key_value_pair.getKey()); + if (!multi_cgra_map_key_node) + continue; + llvm::SmallString<64> multi_cgra_map_key_string; + llvm::StringRef multi_cgra_map_key_ref = + multi_cgra_map_key_node->getValue(multi_cgra_map_key_string); + int temp_value = 0; + if (multi_cgra_map_key_ref == kRows) { + if (parseYamlScalarInt(multi_cgra_map_key_value_pair.getValue(), + temp_value)) + multi_cgra_rows = temp_value; + } else if (multi_cgra_map_key_ref == kColumns) { + if (parseYamlScalarInt(multi_cgra_map_key_value_pair.getValue(), + temp_value)) + multi_cgra_columns = temp_value; + } else if (multi_cgra_map_key_ref == kBaseTopology) { + std::string topo_str; + if (parseYamlScalarString(multi_cgra_map_key_value_pair.getValue(), + topo_str)) + multi_cgra_base_topology = parseTopologyString(topo_str); + } + } + } else if (key_ref == kPerCgraDefaults) { + auto *per_cgra_map = llvm::dyn_cast_or_null( + key_value_pair.getValue()); + if (!per_cgra_map) + continue; + for (auto &per_cgra_map_key_value_pair : *per_cgra_map) { + auto *per_cgra_map_key_node = + llvm::dyn_cast_or_null( + per_cgra_map_key_value_pair.getKey()); + if (!per_cgra_map_key_node) + continue; + llvm::SmallString<64> per_cgra_map_key_string; + llvm::StringRef per_cgra_map_key_ref = + per_cgra_map_key_node->getValue(per_cgra_map_key_string); + int temp_value = 0; + if (per_cgra_map_key_ref == kRows) { + if (parseYamlScalarInt(per_cgra_map_key_value_pair.getValue(), + temp_value)) + per_cgra_rows = temp_value; + } else if (per_cgra_map_key_ref == kColumns) { + if (parseYamlScalarInt(per_cgra_map_key_value_pair.getValue(), + temp_value)) + per_cgra_columns = temp_value; + } else if (per_cgra_map_key_ref == kBaseTopology) { + std::string topo_str; + if (parseYamlScalarString(per_cgra_map_key_value_pair.getValue(), + topo_str)) + per_cgra_base_topology = parseTopologyString(topo_str); + } else if (per_cgra_map_key_ref == kCtrlMemItems) { + if (parseYamlScalarInt(per_cgra_map_key_value_pair.getValue(), + temp_value)) + max_ctrl_mem_items = temp_value; + } + } + } else if (key_ref == kTileDefaults) { + auto *tile_defaults_map = llvm::dyn_cast_or_null( + key_value_pair.getValue()); + if (tile_defaults_map) + parseTileDefaults(tile_defaults_map, tile_defaults); + } else if (key_ref == kTileOverrides) { + auto *tile_overrides_seq = + llvm::dyn_cast_or_null( + key_value_pair.getValue()); + if (tile_overrides_seq) + parseTileOverrides(tile_overrides_seq, tile_overrides); + } else if (key_ref == kLinkDefaults) { + auto *link_defaults_map = llvm::dyn_cast_or_null( + key_value_pair.getValue()); + if (link_defaults_map) + parseLinkDefaults(link_defaults_map, link_defaults); + } else if (key_ref == kLinkOverrides) { + auto *link_overrides_seq = + llvm::dyn_cast_or_null( + key_value_pair.getValue()); + if (link_overrides_seq) + parseLinkOverrides(link_overrides_seq, link_overrides); + } else { + llvm::errs() << "Unknown YAML root key: " << key_ref << "\n"; + } + } + return true; +} +} // namespace util +} // namespace neura +} // namespace mlir diff --git a/lib/NeuraDialect/Util/ParserUtils.cpp b/lib/NeuraDialect/Util/ParserUtils.cpp new file mode 100644 index 00000000..69288665 --- /dev/null +++ b/lib/NeuraDialect/Util/ParserUtils.cpp @@ -0,0 +1,259 @@ +#include "NeuraDialect/Util/ParserUtils.h" + +using namespace mlir::neura::yamlkeys; +using namespace mlir::neura; +namespace mlir { +namespace neura { +namespace util { +// Utility: Extracts an integer from a YAML ScalarNode. Returns true on success. +bool parseYamlScalarInt(const llvm::yaml::Node *node, int &result) { + auto *scalar = llvm::dyn_cast_or_null(node); + if (!scalar) + return false; + llvm::SmallString<64> value_string; + llvm::StringRef value_ref = scalar->getValue(value_string); + long long temp_value = 0; + if (value_ref.getAsInteger(10, temp_value)) + return false; + result = static_cast(temp_value); + return true; +} + +// Utility: Extracts a string from a YAML ScalarNode. Returns true on success. +bool parseYamlScalarString(const llvm::yaml::Node *node, std::string &result) { + auto *scalar = llvm::dyn_cast_or_null(node); + if (!scalar) + return false; + llvm::SmallString<64> value_string; + llvm::StringRef value_ref = scalar->getValue(value_string); + result = value_ref.str(); + return true; +} + +// Utility: Extracts a vector of strings from a YAML SequenceNode. +void parseYamlStringSequence(llvm::yaml::Node *node, + std::vector &result) { + auto *seq = llvm::dyn_cast_or_null(node); + if (!seq) + return; + result.clear(); + for (auto &item : *seq) { + std::string value; + if (parseYamlScalarString(&item, value)) + result.push_back(value); + } +} + +// Utility: Print YAML parse error and return false. +bool yamlParseError(const std::string &msg, const std::string &file) { + llvm::errs() << "YAML parse error"; + if (!file.empty()) + llvm::errs() << " in: " << file; + llvm::errs() << ": " << msg << "\n"; + return false; +} + +// ----------------------------------------------------------------------------- +// Helper function to parse tile defaults. +void parseTileDefaults(llvm::yaml::MappingNode *tile_defaults_map, + mlir::neura::TileDefaults &tile_defaults) { + for (auto &key_value_pair : *tile_defaults_map) { + auto *key_node = + llvm::dyn_cast_or_null(key_value_pair.getKey()); + if (!key_node) + continue; + llvm::SmallString<64> key_string; + llvm::StringRef key_ref = key_node->getValue(key_string); + + if (key_ref == kNumRegisters) { + int temp_value = 0; + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + tile_defaults.num_registers = temp_value; + } else if (key_ref == kFuTypes) { + parseYamlStringSequence(key_value_pair.getValue(), + tile_defaults.function_units); + } else { + llvm::errs() << "Unknown tile_defaults key: " << key_ref << "\n"; + } + } +} + +// Helper function to parse tile override operations and registers. +void parseTileOverrideOperations(llvm::yaml::MappingNode *override_map, + mlir::neura::TileOverride &override) { + for (auto &key_value_pair : *override_map) { + auto *key_node = + llvm::dyn_cast_or_null(key_value_pair.getKey()); + if (!key_node) + continue; + llvm::SmallString<64> key_string; + llvm::StringRef key_ref = key_node->getValue(key_string); + + if (key_ref == kFuTypes) { + parseYamlStringSequence(key_value_pair.getValue(), override.fu_types); + } else if (key_ref == kNumRegisters) { + int temp_value = 0; + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.num_registers = temp_value; + } else { + llvm::errs() << "Unknown tile_override key: " << key_ref << "\n"; + } + } +} + +// Helper function to parse a single tile override. +void parseSingleTileOverride(llvm::yaml::MappingNode *override_map, + mlir::neura::TileOverride &override) { + for (auto &key_value_pair : *override_map) { + auto *key_node = + llvm::dyn_cast_or_null(key_value_pair.getKey()); + if (!key_node) + continue; + llvm::SmallString<64> key_string; + llvm::StringRef key_ref = key_node->getValue(key_string); + + int temp_value = 0; + if (key_ref == kCgraX) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.cgra_x = temp_value; + } else if (key_ref == kCgraY) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.cgra_y = temp_value; + } else if (key_ref == kTileX) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.tile_x = temp_value; + } else if (key_ref == kTileY) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.tile_y = temp_value; + } else if (key_ref == kFuTypes) { + parseYamlStringSequence(key_value_pair.getValue(), override.fu_types); + } else if (key_ref == kNumRegisters) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.num_registers = temp_value; + } else if (key_ref == kExistence) { + std::string value; + if (parseYamlScalarString(key_value_pair.getValue(), value)) { + override.existence = + (value == "true" || value == "True" || value == "1"); + } + } else { + llvm::errs() << "Unknown tile_override key: " << key_ref << "\n"; + } + } +} + +// Helper function to parse tile overrides. +bool parseTileOverrides( + llvm::yaml::SequenceNode *tile_overrides_seq, + std::vector &tile_overrides) { + for (auto &override_node : *tile_overrides_seq) { + auto *override_map = + llvm::dyn_cast_or_null(&override_node); + if (!override_map) + continue; + mlir::neura::TileOverride override; + parseSingleTileOverride(override_map, override); + tile_overrides.push_back(override); + } + return true; +} + +// Helper function to parse link defaults. +bool parseLinkDefaults(llvm::yaml::MappingNode *link_defaults_map, + mlir::neura::LinkDefaults &link_defaults) { + for (auto &key_value_pair : *link_defaults_map) { + auto *key_node = + llvm::dyn_cast_or_null(key_value_pair.getKey()); + if (!key_node) + continue; + llvm::SmallString<64> key_string; + llvm::StringRef key_ref = key_node->getValue(key_string); + + int temp_value = 0; + if (key_ref == kLatency) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + link_defaults.latency = temp_value; + } else if (key_ref == kBandwidth) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + link_defaults.bandwidth = temp_value; + } else { + llvm::errs() << "Unknown link_defaults key: " << key_ref << "\n"; + } + } + return true; +} + +// Helper function to parse a single link override. +void parseSingleLinkOverride(llvm::yaml::MappingNode *override_map, + mlir::neura::LinkOverride &override) { + for (auto &key_value_pair : *override_map) { + auto *key_node = + llvm::dyn_cast_or_null(key_value_pair.getKey()); + if (!key_node) + continue; + llvm::SmallString<64> key_string; + llvm::StringRef key_ref = key_node->getValue(key_string); + + int temp_value = 0; + if (key_ref == kLatency) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.latency = temp_value; + } else if (key_ref == kBandwidth) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.bandwidth = temp_value; + } else if (key_ref == kSrcTileX) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.src_tile_x = temp_value; + } else if (key_ref == kSrcTileY) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.src_tile_y = temp_value; + } else if (key_ref == kDstTileX) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.dst_tile_x = temp_value; + } else if (key_ref == kDstTileY) { + if (parseYamlScalarInt(key_value_pair.getValue(), temp_value)) + override.dst_tile_y = temp_value; + } else if (key_ref == kExistence) { + std::string value; + if (parseYamlScalarString(key_value_pair.getValue(), value)) { + override.existence = + (value == "true" || value == "True" || value == "1"); + } + } else { + llvm::errs() << "Unknown link_override key: " << key_ref << "\n"; + } + } +} + +// Helper function to parse link overrides. +bool parseLinkOverrides( + llvm::yaml::SequenceNode *link_overrides_seq, + std::vector &link_overrides) { + for (auto &override_node : *link_overrides_seq) { + auto *override_map = + llvm::dyn_cast_or_null(&override_node); + if (!override_map) + continue; + mlir::neura::LinkOverride override; + parseSingleLinkOverride(override_map, override); + link_overrides.push_back(override); + } + return true; +} + +// Helper function to parse topology string to BaseTopology enum +BaseTopology parseTopologyString(const std::string &topology_str) { + if (topology_str == kMesh) { + return mlir::neura::BaseTopology::MESH; + } else if (topology_str == kKingMesh || topology_str == kKingMeshAlt) { + return mlir::neura::BaseTopology::KING_MESH; + } else if (topology_str == kRing) { + return mlir::neura::BaseTopology::RING; + } else { + // Default to mesh if unknown topology + return mlir::neura::BaseTopology::MESH; + } +} +} // namespace util +} // namespace neura +} // namespace mlir diff --git a/test/compiler_e2e/fir/fir_kernel.mlir b/test/compiler_e2e/fir/fir_kernel.mlir new file mode 100644 index 00000000..f283d5cd --- /dev/null +++ b/test/compiler_e2e/fir/fir_kernel.mlir @@ -0,0 +1,376 @@ +// Compiles the original C kernel to mlir, then lowers it via Neura. +// RUN: clang++ -S -emit-llvm -O3 -fno-vectorize -fno-unroll-loops -o %t-kernel-full.ll %S/../../benchmark/CGRA-Bench/kernels/fir/fir_int.cpp +// RUN: llvm-extract --rfunc=".*kernel.*" %t-kernel-full.ll -o %t-kernel-only.ll +// RUN: mlir-translate --import-llvm %t-kernel-only.ll -o %t-kernel.mlir + +// RUN: neura-compiler %t-kernel.mlir --neura-conversion --architecture-spec=%S/../../arch_spec/architecture.yaml -o %t-mapping.mlir +// RUN: FileCheck %s --input-file=%t-mapping.mlir -check-prefix=MAPPING +// RUN: FileCheck %s --input-file=tmp-generated-instructions.yaml --check-prefix=YAML +// RUN: FileCheck %s --input-file=tmp-generated-instructions.asm --check-prefix=ASM + +// MAPPING: func.func +// MAPPING-SAME: compiled_ii = 5 : i32 +// MAPPING-SAME: mapping_mode = "spatial-temporal" +// MAPPING-SAME: mapping_strategy = "heuristic" +// MAPPING-SAME: rec_mii = 5 : i32 +// MAPPING-SAME: res_mii = 1 : i32 +// MAPPING-SAME: x_tiles = 4 : i32 +// MAPPING-SAME: y_tiles = 4 : i32 +// +// MAPPING-NEXT: %0 = "neura.grant_once"() <{constant_value = 0 : i64}> {dfg_id = 0 : i32, mapping_locs = [{id = 11 : i32, index_per_ii = 0 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 0 : i32, x = 3 : i32, y = 2 : i32}]} : () -> !neura.data +// MAPPING-NEXT: %1 = "neura.grant_once"() <{constant_value = 0 : i32}> {dfg_id = 1 : i32, mapping_locs = [{id = 6 : i32, index_per_ii = 1 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 1 : i32, x = 2 : i32, y = 1 : i32}]} : () -> !neura.data +// MAPPING-NEXT: %2 = neura.reserve {dfg_id = 2 : i32} : !neura.data +// MAPPING-NEXT: %3 = "neura.data_mov"(%1) {dfg_id = 6 : i32, mapping_locs = [{id = 192 : i32, index_per_ii = 1 : i32, invalid_iterations = 0 : i32, per_tile_register_id = 0 : i32, resource = "register", time_step = 1 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %4 = neura.phi_start %3, %2 {dfg_id = 8 : i32, mapping_locs = [{id = 6 : i32, index_per_ii = 2 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 2 : i32, x = 2 : i32, y = 1 : i32}]} : !neura.data, !neura.data -> !neura.data +// MAPPING-NEXT: %5 = neura.reserve {dfg_id = 3 : i32} : !neura.data +// MAPPING-NEXT: %6 = "neura.data_mov"(%0) {dfg_id = 5 : i32, mapping_locs = [{id = 35 : i32, index_per_ii = 0 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 0 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %7 = neura.phi_start %6, %5 {dfg_id = 7 : i32, mapping_locs = [{id = 10 : i32, index_per_ii = 1 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 1 : i32, x = 2 : i32, y = 2 : i32}]} : !neura.data, !neura.data -> !neura.data +// MAPPING-NEXT: %8 = "neura.data_mov"(%7) {dfg_id = 11 : i32, mapping_locs = [{id = 34 : i32, index_per_ii = 1 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 1 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %9 = neura.load_indexed [%8 : !neura.data] {dfg_id = 15 : i32, mapping_locs = [{id = 14 : i32, index_per_ii = 2 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 2 : i32, x = 2 : i32, y = 3 : i32}]} : !neura.data +// MAPPING-NEXT: %10 = "neura.data_mov"(%7) {dfg_id = 10 : i32, mapping_locs = [{id = 31 : i32, index_per_ii = 1 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 1 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %11 = neura.load_indexed [%10 : !neura.data] {dfg_id = 14 : i32, mapping_locs = [{id = 9 : i32, index_per_ii = 2 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 2 : i32, x = 1 : i32, y = 2 : i32}]} : !neura.data +// MAPPING-NEXT: %12 = "neura.data_mov"(%11) {dfg_id = 18 : i32, mapping_locs = [{id = 28 : i32, index_per_ii = 2 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 2 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %13 = "neura.data_mov"(%9) {dfg_id = 19 : i32, mapping_locs = [{id = 45 : i32, index_per_ii = 2 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 2 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %14 = "neura.data_mov"(%4) {dfg_id = 12 : i32, mapping_locs = [{id = 20 : i32, index_per_ii = 2 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 2 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %15 = "neura.mul_add"(%12, %13, %14) {dfg_id = 21 : i32, mapping_locs = [{id = 10 : i32, index_per_ii = 3 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 3 : i32, x = 2 : i32, y = 2 : i32}]} : (!neura.data, !neura.data, !neura.data) -> !neura.data +// MAPPING-NEXT: %16 = "neura.data_mov"(%7) {dfg_id = 9 : i32, mapping_locs = [{id = 320 : i32, index_per_ii = 1 : i32, invalid_iterations = 0 : i32, per_tile_register_id = 0 : i32, resource = "register", time_step = 1 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %17 = "neura.add"(%16) {dfg_id = 13 : i32, mapping_locs = [{id = 10 : i32, index_per_ii = 2 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 2 : i32, x = 2 : i32, y = 2 : i32}], rhs_value = 1 : i64} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %18 = "neura.data_mov"(%17) {dfg_id = 17 : i32, mapping_locs = [{id = 31 : i32, index_per_ii = 2 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 2 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %19 = "neura.icmp"(%18) <{cmpType = "eq"}> {dfg_id = 20 : i32, mapping_locs = [{id = 9 : i32, index_per_ii = 3 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 3 : i32, x = 1 : i32, y = 2 : i32}], rhs_value = 32 : i64} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %20 = "neura.data_mov"(%19) {dfg_id = 23 : i32, mapping_locs = [{id = 288 : i32, index_per_ii = 3 : i32, invalid_iterations = 0 : i32, per_tile_register_id = 0 : i32, resource = "register", time_step = 3 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %21 = "neura.not"(%20) {dfg_id = 26 : i32, mapping_locs = [{id = 9 : i32, index_per_ii = 4 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 4 : i32, x = 1 : i32, y = 2 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %22 = "neura.data_mov"(%17) {dfg_id = 16 : i32, mapping_locs = [{id = 320 : i32, index_per_ii = 2 : i32, invalid_iterations = 0 : i32, per_tile_register_id = 0 : i32, resource = "register", time_step = 2 : i32}, {id = 31 : i32, index_per_ii = 3 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 3 : i32}, {id = 288 : i32, index_per_ii = 4 : i32, invalid_iterations = 0 : i32, per_tile_register_id = 0 : i32, resource = "register", time_step = 4 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %23 = "neura.data_mov"(%21) {dfg_id = 29 : i32, mapping_locs = [{id = 289 : i32, index_per_ii = 4 : i32, invalid_iterations = 0 : i32, per_tile_register_id = 1 : i32, resource = "register", time_step = 4 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %24 = neura.grant_predicate %22, %23 {dfg_id = 32 : i32, mapping_locs = [{id = 9 : i32, index_per_ii = 0 : i32, invalid_iterations = 1 : i32, resource = "tile", time_step = 5 : i32, x = 1 : i32, y = 2 : i32}]} : !neura.data, !neura.data -> !neura.data +// MAPPING-NEXT: neura.ctrl_mov %24 -> %5 {dfg_id = 35 : i32, mapping_locs = [{id = 28 : i32, index_per_ii = 0 : i32, invalid_iterations = 1 : i32, resource = "link", time_step = 5 : i32}]} : !neura.data !neura.data +// MAPPING-NEXT: %25 = "neura.data_mov"(%15) {dfg_id = 25 : i32, mapping_locs = [{id = 321 : i32, index_per_ii = 3 : i32, invalid_iterations = 0 : i32, per_tile_register_id = 1 : i32, resource = "register", time_step = 3 : i32}, {id = 321 : i32, index_per_ii = 4 : i32, invalid_iterations = 0 : i32, per_tile_register_id = 1 : i32, resource = "register", time_step = 4 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %26 = "neura.data_mov"(%21) {dfg_id = 28 : i32, mapping_locs = [{id = 28 : i32, index_per_ii = 4 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 4 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %27 = neura.grant_predicate %25, %26 {dfg_id = 31 : i32, mapping_locs = [{id = 10 : i32, index_per_ii = 0 : i32, invalid_iterations = 1 : i32, resource = "tile", time_step = 5 : i32, x = 2 : i32, y = 2 : i32}]} : !neura.data, !neura.data -> !neura.data +// MAPPING-NEXT: neura.ctrl_mov %27 -> %2 {dfg_id = 34 : i32, mapping_locs = [{id = 33 : i32, index_per_ii = 0 : i32, invalid_iterations = 1 : i32, resource = "link", time_step = 5 : i32}, {id = 193 : i32, index_per_ii = 1 : i32, invalid_iterations = 1 : i32, per_tile_register_id = 1 : i32, resource = "register", time_step = 6 : i32}]} : !neura.data !neura.data +// MAPPING-NEXT: %28 = "neura.data_mov"(%15) {dfg_id = 24 : i32, mapping_locs = [{id = 320 : i32, index_per_ii = 3 : i32, invalid_iterations = 0 : i32, per_tile_register_id = 0 : i32, resource = "register", time_step = 3 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %29 = "neura.data_mov"(%19) {dfg_id = 22 : i32, mapping_locs = [{id = 28 : i32, index_per_ii = 3 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 3 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: %30 = neura.grant_predicate %28, %29 {dfg_id = 27 : i32, mapping_locs = [{id = 10 : i32, index_per_ii = 4 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 4 : i32, x = 2 : i32, y = 2 : i32}]} : !neura.data, !neura.data -> !neura.data +// MAPPING-NEXT: %31 = "neura.data_mov"(%30) {dfg_id = 30 : i32, mapping_locs = [{id = 33 : i32, index_per_ii = 4 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 4 : i32}]} : (!neura.data) -> !neura.data +// MAPPING-NEXT: neura.return_value %31 : !neura.data {dfg_id = 33 : i32, mapping_locs = [{id = 6 : i32, index_per_ii = 0 : i32, invalid_iterations = 1 : i32, resource = "tile", time_step = 5 : i32, x = 2 : i32, y = 1 : i32}]} +// MAPPING-NEXT: neura.yield {dfg_id = 4 : i32} + + +// ASM: # Compiled II: 5 + +// ASM: PE(2,1): +// ASM-NEXT: { +// ASM-NEXT: RETURN_VALUE, [NORTH, RED] (t=5, inv_iters=1) +// ASM-NEXT: } (idx_per_ii=0) +// ASM-NEXT: { +// ASM-NEXT: GRANT_ONCE, [#0] -> [$0] (t=1, inv_iters=0) +// ASM-NEXT: CTRL_MOV, [NORTH, RED] -> [$1] (t=6, inv_iters=1) +// ASM-NEXT: } (idx_per_ii=1) +// ASM-NEXT: { +// ASM-NEXT: PHI_START, [$0], [$1] -> [NORTH, RED] (t=2, inv_iters=0) +// ASM-NEXT: } (idx_per_ii=2) + +// ASM: PE(1,2): +// ASM-NEXT: { +// ASM-NEXT: GRANT_PREDICATE, [$0], [$1] -> [EAST, RED] (t=5, inv_iters=1) +// ASM-NEXT: } (idx_per_ii=0) +// ASM-NEXT: { +// ASM-NEXT: LOAD_INDEXED, [EAST, RED] -> [EAST, RED] (t=2, inv_iters=0) +// ASM-NEXT: } (idx_per_ii=2) +// ASM-NEXT: { +// ASM-NEXT: ICMP_EQ, [EAST, RED], [#32] -> [$0], [EAST, RED] (t=3, inv_iters=0) +// ASM-NEXT: } (idx_per_ii=3) +// ASM-NEXT: { +// ASM-NEXT: NOT, [$0] -> [$1], [EAST, RED] (t=4, inv_iters=0) +// ASM-NEXT: DATA_MOV, [EAST, RED] -> [$0] (t=4, inv_iters=0) +// ASM-NEXT: } (idx_per_ii=4) + +// ASM: PE(2,2): +// ASM-NEXT: { +// ASM-NEXT: GRANT_PREDICATE, [$1], [WEST, RED] -> [SOUTH, RED] (t=5, inv_iters=1) +// ASM-NEXT: } (idx_per_ii=0) +// ASM-NEXT: { +// ASM-NEXT: PHI_START, [EAST, RED], [WEST, RED] -> [NORTH, RED], [WEST, RED], [$0] (t=1, inv_iters=0) +// ASM-NEXT: } (idx_per_ii=1) +// ASM-NEXT: { +// ASM-NEXT: ADD, [$0], [#1] -> [WEST, RED], [$0] (t=2, inv_iters=0) +// ASM-NEXT: } (idx_per_ii=2) +// ASM-NEXT: { +// ASM-NEXT: MUL_ADD, [WEST, RED], [NORTH, RED], [SOUTH, RED] -> [$1], [$0] (t=3, inv_iters=0) +// ASM-NEXT: DATA_MOV, [$0] -> [WEST, RED] (t=3, inv_iters=0) +// ASM-NEXT: } (idx_per_ii=3) +// ASM-NEXT: { +// ASM-NEXT: GRANT_PREDICATE, [$0], [WEST, RED] -> [SOUTH, RED] (t=4, inv_iters=0) +// ASM-NEXT: } (idx_per_ii=4) + +// ASM: PE(3,2): +// ASM-NEXT: { +// ASM-NEXT: GRANT_ONCE, [#0] -> [WEST, RED] (t=0, inv_iters=0) +// ASM-NEXT: } (idx_per_ii=0) + +// ASM: PE(2,3): +// ASM-NEXT: { +// ASM-NEXT: LOAD_INDEXED, [SOUTH, RED] -> [SOUTH, RED] (t=2, inv_iters=0) +// ASM-NEXT: } (idx_per_ii=2) + + + +// YAML: array_config: +// YAML-NEXT: columns: 4 +// YAML-NEXT: rows: 4 +// YAML-NEXT: compiled_ii: 5 +// YAML-NEXT: cores: +// YAML-NEXT: - column: 2 +// YAML-NEXT: row: 1 +// YAML-NEXT: core_id: "6" +// YAML-NEXT: entries: +// YAML-NEXT: - entry_id: "entry0" +// YAML-NEXT: instructions: +// YAML-NEXT: - index_per_ii: 0 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "RETURN_VALUE" +// YAML-NEXT: id: 33 +// YAML-NEXT: time_step: 5 +// YAML-NEXT: invalid_iterations: 1 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "NORTH" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - index_per_ii: 1 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "GRANT_ONCE" +// YAML-NEXT: id: 1 +// YAML-NEXT: time_step: 1 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "#0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - opcode: "CTRL_MOV" +// YAML-NEXT: id: 34 +// YAML-NEXT: time_step: 6 +// YAML-NEXT: invalid_iterations: 1 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "NORTH" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "$1" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - index_per_ii: 2 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "PHI_START" +// YAML-NEXT: id: 8 +// YAML-NEXT: time_step: 2 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "$1" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "NORTH" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - column: 1 +// YAML-NEXT: row: 2 +// YAML-NEXT: core_id: "9" +// YAML-NEXT: entries: +// YAML-NEXT: - entry_id: "entry0" +// YAML-NEXT: instructions: +// YAML-NEXT: - index_per_ii: 0 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "GRANT_PREDICATE" +// YAML-NEXT: id: 32 +// YAML-NEXT: time_step: 5 +// YAML-NEXT: invalid_iterations: 1 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "$1" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "EAST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - index_per_ii: 2 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "LOAD_INDEXED" +// YAML-NEXT: id: 14 +// YAML-NEXT: time_step: 2 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "EAST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "EAST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - index_per_ii: 3 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "ICMP_EQ" +// YAML-NEXT: id: 20 +// YAML-NEXT: time_step: 3 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "EAST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "#32" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "EAST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - index_per_ii: 4 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "NOT" +// YAML-NEXT: id: 26 +// YAML-NEXT: time_step: 4 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "$1" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "EAST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - opcode: "DATA_MOV" +// YAML-NEXT: id: 16 +// YAML-NEXT: time_step: 4 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "EAST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - column: 2 +// YAML-NEXT: row: 2 +// YAML-NEXT: core_id: "10" +// YAML-NEXT: entries: +// YAML-NEXT: - entry_id: "entry0" +// YAML-NEXT: instructions: +// YAML-NEXT: - index_per_ii: 0 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "GRANT_PREDICATE" +// YAML-NEXT: id: 31 +// YAML-NEXT: time_step: 5 +// YAML-NEXT: invalid_iterations: 1 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "$1" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "WEST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "SOUTH" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - index_per_ii: 1 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "PHI_START" +// YAML-NEXT: id: 7 +// YAML-NEXT: time_step: 1 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "EAST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "WEST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "NORTH" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "WEST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - index_per_ii: 2 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "ADD" +// YAML-NEXT: id: 13 +// YAML-NEXT: time_step: 2 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "#1" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "WEST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - index_per_ii: 3 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "MUL_ADD" +// YAML-NEXT: id: 21 +// YAML-NEXT: time_step: 3 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "WEST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "NORTH" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "SOUTH" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "$1" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - opcode: "DATA_MOV" +// YAML-NEXT: id: 160000 +// YAML-NEXT: time_step: 3 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "WEST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - index_per_ii: 4 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "GRANT_PREDICATE" +// YAML-NEXT: id: 27 +// YAML-NEXT: time_step: 4 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "$0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - operand: "WEST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "SOUTH" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - column: 3 +// YAML-NEXT: row: 2 +// YAML-NEXT: core_id: "11" +// YAML-NEXT: entries: +// YAML-NEXT: - entry_id: "entry0" +// YAML-NEXT: instructions: +// YAML-NEXT: - index_per_ii: 0 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "GRANT_ONCE" +// YAML-NEXT: id: 0 +// YAML-NEXT: time_step: 0 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "#0" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "WEST" +// YAML-NEXT: color: "RED" +// YAML-NEXT: - column: 2 +// YAML-NEXT: row: 3 +// YAML-NEXT: core_id: "14" +// YAML-NEXT: entries: +// YAML-NEXT: - entry_id: "entry0" +// YAML-NEXT: instructions: +// YAML-NEXT: - index_per_ii: 2 +// YAML-NEXT: operations: +// YAML-NEXT: - opcode: "LOAD_INDEXED" +// YAML-NEXT: id: 15 +// YAML-NEXT: time_step: 2 +// YAML-NEXT: invalid_iterations: 0 +// YAML-NEXT: src_operands: +// YAML-NEXT: - operand: "SOUTH" +// YAML-NEXT: color: "RED" +// YAML-NEXT: dst_operands: +// YAML-NEXT: - operand: "SOUTH" +// YAML-NEXT: color: "RED" diff --git a/test/compiler_e2e/visualize/test.mlir b/test/compiler_e2e/visualize/test.mlir new file mode 100644 index 00000000..a07cc79f --- /dev/null +++ b/test/compiler_e2e/visualize/test.mlir @@ -0,0 +1,165 @@ +// Test neura-compiler e2e pipeline +// RUN: neura-compiler --neura-conversion %s --architecture-spec=%S/../../arch_spec/architecture.yaml -o %t-mapping.mlir +// RUN: FileCheck %s --input-file=%t-mapping.mlir -check-prefix=MAPPING +// RUN: FileCheck %s --input-file=tmp-generated-instructions.yaml -check-prefix=YAML +// RUN: FileCheck %s --input-file=tmp-generated-instructions.asm -check-prefix=ASM +// RUN: FileCheck %s --input-file=tmp-generated-dfg.yaml -check-prefix=DFG +// RUN: FileCheck %s --input-file=%S/opgraph.dot -check-prefix=CHECK-GRAPH + +func.func @test_print_op_graph(%a: f32, %b: f32) -> f32 { + %c = arith.constant 2.0 : f32 + %d = arith.addf %a, %b : f32 + %e = arith.mulf %d, %c : f32 + return %e : f32 +} + +// MAPPING: module +// MAPPING: func.func @test_print_op_graph(%arg0: f32, %arg1: f32) -> f32 attributes {accelerator = "neura", dataflow_mode = "predicate", mapping_info = {compiled_ii = 1 : i32, mapping_mode = "spatial-temporal", mapping_strategy = "heuristic", rec_mii = 1 : i32, res_mii = 1 : i32, x_tiles = 4 : i32, y_tiles = 4 : i32}} +// MAPPING: %0 = "neura.constant"() <{value = "%arg0"}> {dfg_id = 0 : i32, mapping_locs = [{id = 0 : i32, index_per_ii = 0 : i32, invalid_iterations = 0 : i32, resource = "tile", time_step = 0 : i32, x = 0 : i32, y = 0 : i32}]} : () -> !neura.data +// MAPPING: %1 = "neura.data_mov"(%0) {dfg_id = 2 : i32, mapping_locs = [{id = 0 : i32, index_per_ii = 0 : i32, invalid_iterations = 0 : i32, resource = "link", time_step = 0 : i32}]} : (!neura.data) -> !neura.data +// MAPPING: %2 = "neura.fadd"(%1) {dfg_id = 3 : i32, mapping_locs = [{id = 1 : i32, index_per_ii = 0 : i32, invalid_iterations = 1 : i32, resource = "tile", time_step = 1 : i32, x = 1 : i32, y = 0 : i32}], rhs_value = "%arg1"} : (!neura.data) -> !neura.data +// MAPPING: %3 = "neura.data_mov"(%2) {dfg_id = 4 : i32, mapping_locs = [{id = 3 : i32, index_per_ii = 0 : i32, invalid_iterations = 1 : i32, resource = "link", time_step = 1 : i32}]} : (!neura.data) -> !neura.data +// MAPPING: %4 = "neura.fmul"(%3) {dfg_id = 5 : i32, mapping_locs = [{id = 2 : i32, index_per_ii = 0 : i32, invalid_iterations = 2 : i32, resource = "tile", time_step = 2 : i32, x = 2 : i32, y = 0 : i32}], rhs_value = 2.000000e+00 : f32} : (!neura.data) -> !neura.data +// MAPPING: %5 = "neura.data_mov"(%4) {dfg_id = 6 : i32, mapping_locs = [{id = 6 : i32, index_per_ii = 0 : i32, invalid_iterations = 2 : i32, resource = "link", time_step = 2 : i32}]} : (!neura.data) -> !neura.data +// MAPPING: neura.return_value %5 : !neura.data {dfg_id = 7 : i32, mapping_locs = [{id = 3 : i32, index_per_ii = 0 : i32, invalid_iterations = 3 : i32, resource = "tile", time_step = 3 : i32, x = 3 : i32, y = 0 : i32}]} +// MAPPING: neura.yield {dfg_id = 1 : i32} + +// YAML: array_config: +// YAML: columns: 4 +// YAML: rows: 4 +// YAML: compiled_ii: 1 +// YAML: cores: +// YAML: - column: 0 +// YAML: row: 0 +// YAML: core_id: "0" +// YAML: entries: +// YAML: - entry_id: "entry0" +// YAML: instructions: +// YAML: - index_per_ii: 0 +// YAML: operations: +// YAML: - opcode: "CONSTANT" +// YAML: id: 0 +// YAML: time_step: 0 +// YAML: invalid_iterations: 0 +// YAML: src_operands: +// YAML: - operand: "arg0" +// YAML: dst_operands: +// YAML: - operand: "EAST" +// YAML: - column: 1 +// YAML: row: 0 +// YAML: core_id: "1" +// YAML: entries: +// YAML: - entry_id: "entry0" +// YAML: instructions: +// YAML: - index_per_ii: 0 +// YAML: operations: +// YAML: - opcode: "FADD" +// YAML: id: 3 +// YAML: time_step: 1 +// YAML: invalid_iterations: 1 +// YAML: src_operands: +// YAML: - operand: "WEST" +// YAML: - operand: "arg1" +// YAML: dst_operands: +// YAML: - operand: "EAST" +// YAML: - column: 2 +// YAML: row: 0 +// YAML: core_id: "2" +// YAML: entries: +// YAML: - entry_id: "entry0" +// YAML: instructions: +// YAML: - index_per_ii: 0 +// YAML: operations: +// YAML: - opcode: "FMUL" +// YAML: id: 5 +// YAML: time_step: 2 +// YAML: invalid_iterations: 2 +// YAML: src_operands: +// YAML: - operand: "WEST" +// YAML: - operand: "#2.000000" +// YAML: dst_operands: +// YAML: - operand: "EAST" +// YAML: - column: 3 +// YAML: row: 0 +// YAML: core_id: "3" +// YAML: entries: +// YAML: - entry_id: "entry0" +// YAML: instructions: +// YAML: - index_per_ii: 0 +// YAML: operations: +// YAML: - opcode: "RETURN_VALUE" +// YAML: id: 7 +// YAML: time_step: 3 +// YAML: invalid_iterations: 3 +// YAML: src_operands: +// YAML: - operand: "WEST" + +// DFG: nodes: +// DFG: - id: 0 +// DFG: opcode: "CONSTANT" +// DFG: tile_x: 0 +// DFG: tile_y: 0 +// DFG: time_step: 0 +// DFG: - id: 3 +// DFG: opcode: "FADD" +// DFG: tile_x: 1 +// DFG: tile_y: 0 +// DFG: time_step: 1 +// DFG: - id: 5 +// DFG: opcode: "FMUL" +// DFG: tile_x: 2 +// DFG: tile_y: 0 +// DFG: time_step: 2 +// DFG: - id: 7 +// DFG: opcode: "RETURN_VALUE" +// DFG: tile_x: 3 +// DFG: tile_y: 0 +// DFG: time_step: 3 +// DFG: edges: +// DFG: - from: 5 +// DFG: to: 7 +// DFG: - from: 3 +// DFG: to: 5 +// DFG: - from: 0 +// DFG: to: 3 + +// ASM: Compiled II: 1 +// ASM: PE(0,0): +// ASM: { +// ASM: CONSTANT, [arg0] -> [EAST, RED] (t=0, inv_iters=0) +// ASM: } (idx_per_ii=0) +// ASM: PE(1,0): +// ASM: { +// ASM: FADD, [WEST, RED], [arg1] -> [EAST, RED] (t=1, inv_iters=1) +// ASM: } (idx_per_ii=0) +// ASM: PE(2,0): +// ASM: { +// ASM: FMUL, [WEST, RED], [#2.000000] -> [EAST, RED] (t=2, inv_iters=2) +// ASM: } (idx_per_ii=0) +// ASM: PE(3,0): +// ASM: { +// ASM: RETURN_VALUE, [WEST, RED] (t=3, inv_iters=3) +// ASM: } (idx_per_ii=0) + + +// CHECK-GRAPH: digraph G +// CHECK-GRAPH: label = "neura.constant : (f32)\n\nvalue: 2.000000e+00 : f32", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.fadd : (f32)\n", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.fmul : (f32)\n", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.return : ()\n", shape = ellipse, style = filled]; + +// CHECK-GRAPH: digraph G +// CHECK-GRAPH: label = "neura.constant : (!neura.data)\n\nvalue: \"%arg0\"", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.fadd : (!neura.data)\n\nrhs_value: \"%arg1\"", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.fmul : (!neura.data)\n\nrhs_value: 2.000000e+00 : f32", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.return : ()\n\nreturn_type: \"value\"", shape = ellipse, style = filled]; + +// CHECK-GRAPH: digraph G +// CHECK-GRAPH: label = "neura.constant : (!neura.data)\n\nvalue: \"%arg0\"", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.data_mov : (!neura.data)\n", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.fadd : (!neura.data)\n\nrhs_value: \"%arg1\"", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.data_mov : (!neura.data)\n", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.fmul : (!neura.data)\n\nrhs_value: 2.000000e+00 : f32", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.data_mov : (!neura.data)\n", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.return_value : ()\n", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.yield : ()\n", shape = ellipse, style = filled]; diff --git a/test/visualize/test2.mlir b/test/visualize/test2.mlir index fbdb21b7..7a686e52 100644 --- a/test/visualize/test2.mlir +++ b/test/visualize/test2.mlir @@ -1,25 +1,32 @@ // Test PrintOpGraphPass in neura-compiler -// RUN: neura-compiler --neura-conversion %s +// RUN: neura-compiler --neura-conversion %s --architecture-spec=%S/../arch_spec/architecture.yaml // RUN: FileCheck %s --input-file=%S/opgraph.dot -check-prefix=CHECK-GRAPH func.func @test_print_op_graph(%a: f32, %b: f32) -> f32 { - %c = arith.constant 1.0 : f32 + %c = arith.constant 2.0 : f32 %d = arith.addf %a, %b : f32 %e = arith.mulf %d, %c : f32 return %e : f32 } // CHECK-GRAPH: digraph G -// CHECK-GRAPH: compound = true; -// CHECK-GRAPH: label = "func.func : ()\n\naccelerator: \"neura\"\nfunction_type: (f32, f32) -> f32\nsym_name: \"test_print_op_graph..."; -// CHECK-GRAPH: label = "neura.fadd : (f32)\n" -// CHECK-GRAPH: label = "neura.return : ()\n" +// CHECK-GRAPH: label = "neura.constant : (f32)\n\nvalue: 2.000000e+00 : f32", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.fadd : (f32)\n", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.fmul : (f32)\n", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.return : ()\n", shape = ellipse, style = filled]; + // CHECK-GRAPH: digraph G -// CHECK-GRAPH: label = "func.func : ()\n\naccelerator: \"neura\"\nfunction_type: (f32, f32) -> f32\nsym_name: \"test_print_op_graph..."; // CHECK-GRAPH: label = "neura.constant : (!neura.data)\n\nvalue: \"%arg0\"", shape = ellipse, style = filled]; // CHECK-GRAPH: label = "neura.fadd : (!neura.data)\n\nrhs_value: \"%arg1\"", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.fmul : (!neura.data)\n\nrhs_value: 2.000000e+00 : f32", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.return : ()\n\nreturn_type: \"value\"", shape = ellipse, style = filled]; + // CHECK-GRAPH: digraph G -// CHECK-GRAPH: label = "func.func : ()\n\naccelerator: \"neura\"\ndataflow_mode: \"predicate\"\nfunction_type: (f32, f32) -> f32\nsym_name: \"test_print_op_graph..."; // CHECK-GRAPH: label = "neura.constant : (!neura.data)\n\nvalue: \"%arg0\"", shape = ellipse, style = filled]; -// CHECK-GRAPH: label = "neura.data_mov : (!neura.data) +// CHECK-GRAPH: label = "neura.data_mov : (!neura.data)\n", shape = ellipse, style = filled]; // CHECK-GRAPH: label = "neura.fadd : (!neura.data)\n\nrhs_value: \"%arg1\"", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.data_mov : (!neura.data)\n", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.fmul : (!neura.data)\n\nrhs_value: 2.000000e+00 : f32", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.data_mov : (!neura.data)\n", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.return_value : ()\n", shape = ellipse, style = filled]; +// CHECK-GRAPH: label = "neura.yield : ()\n", shape = ellipse, style = filled]; diff --git a/tools/mlir-neura-opt/mlir-neura-opt.cpp b/tools/mlir-neura-opt/mlir-neura-opt.cpp index a4ac0e2e..c2d0e1bc 100644 --- a/tools/mlir-neura-opt/mlir-neura-opt.cpp +++ b/tools/mlir-neura-opt/mlir-neura-opt.cpp @@ -14,24 +14,28 @@ #include "llvm/Support/CommandLine.h" #include "Conversion/ConversionPasses.h" -#include "NeuraDialect/Architecture/ArchitectureSpec.h" +#include "NeuraDialect/Architecture/Architecture.h" #include "NeuraDialect/NeuraDialect.h" #include "NeuraDialect/NeuraPasses.h" +#include "NeuraDialect/Util/ArchParser.h" #include "TaskflowDialect/TaskflowDialect.h" #include "TaskflowDialect/TaskflowPasses.h" +using mlir::neura::Architecture; +using mlir::neura::util::ArchParser; // Global variable to store architecture spec file path static std::string architecture_spec_file; -static mlir::neura::TileDefaults tile_defaults; -// Function to get the architecture spec file path -std::string mlir::neura::getArchitectureSpecFile() { - return architecture_spec_file; -} - -// Function to get tile defaults configuration -mlir::neura::TileDefaults mlir::neura::getTileDefaults() { - return tile_defaults; +const Architecture &mlir::neura::getArchitecture() { + static Architecture instance = []() { + auto arch_parser = ArchParser(architecture_spec_file); + auto architecture_result = arch_parser.getArchitecture(); + if (failed(architecture_result)) { + llvm::report_fatal_error("[neura-compiler] Failed to get architecture."); + } + return std::move(architecture_result.value()); + }(); + return instance; } int main(int argc, char **argv) { @@ -47,6 +51,10 @@ int main(int argc, char **argv) { architecture_spec_file = argv[i + 1]; ++i; // skip value continue; + } else { + llvm::errs() << "[mlir-neura-opt] Error: --architecture-spec option " + "requires a value\n"; + return EXIT_FAILURE; } } else if (arg_ref.starts_with("--architecture-spec=")) { architecture_spec_file = diff --git a/tools/neura-compiler/neura-compiler.cpp b/tools/neura-compiler/neura-compiler.cpp index ec0159a1..e728b8b0 100644 --- a/tools/neura-compiler/neura-compiler.cpp +++ b/tools/neura-compiler/neura-compiler.cpp @@ -10,10 +10,58 @@ #include "mlir/Tools/mlir-opt/MlirOptMain.h" #include "Conversion/ConversionPasses.h" +#include "NeuraDialect/Architecture/Architecture.h" #include "NeuraDialect/NeuraDialect.h" #include "NeuraDialect/NeuraPasses.h" +#include "NeuraDialect/Util/ArchParser.h" +#include "mlir/Support/LogicalResult.h" + +using mlir::neura::Architecture; +using mlir::neura::util::ArchParser; + +// Global variable to store architecture spec file path +static std::string architecture_spec_file; + +const Architecture &mlir::neura::getArchitecture() { + static Architecture instance = []() { + auto arch_parser = ArchParser(architecture_spec_file); + auto architecture_result = arch_parser.getArchitecture(); + if (failed(architecture_result)) { + llvm::report_fatal_error("[neura-compiler] Failed to get architecture."); + } + return std::move(architecture_result.value()); + }(); + return instance; +} int main(int argc, char **argv) { + // Manually scan and strip --architecture-spec from argv, keep others for + // MlirOptMain. + std::vector forwarded_args; + forwarded_args.reserve(argc); + forwarded_args.push_back(argv[0]); + for (int i = 1; i < argc; ++i) { + llvm::StringRef arg_ref(argv[i]); + if (arg_ref == "--architecture-spec") { + if (i + 1 < argc) { + architecture_spec_file = argv[i + 1]; + ++i; // skip value + continue; + } else { + llvm::errs() << "[neura-compiler] Error: --architecture-spec option " + "requires a value\n"; + return EXIT_FAILURE; + } + } else if (arg_ref.starts_with("--architecture-spec=")) { + architecture_spec_file = + arg_ref.substr(strlen("--architecture-spec=")).str(); + continue; + } + forwarded_args.push_back(argv[i]); + } + + int new_argc = static_cast(forwarded_args.size()); + char **new_argv = forwarded_args.data(); // Registers MLIR dialects. mlir::DialectRegistry registry; registry.insert(); @@ -26,7 +74,15 @@ int main(int argc, char **argv) { mlir::neura::registerNeuraConversionPassPipeline(); + // Print architecture spec file info + if (!architecture_spec_file.empty()) { + llvm::errs() << "[neura-compiler] Architecture specification file: " + << architecture_spec_file << "\n"; + } else { + llvm::errs() << "[neura-compiler] No architecture specification file " + "provided, using default configuration\n"; + } // Runs the MLIR optimizer. - return mlir::asMainReturnCode( - mlir::MlirOptMain(argc, argv, "Neura Dialect Compiler", registry)); -} \ No newline at end of file + return mlir::asMainReturnCode(mlir::MlirOptMain( + new_argc, new_argv, "Neura Dialect Compiler", registry)); +}