Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ jobs:
run: |
mkdir -p ${{ env.CCACHE_DIR }}
git --version
git clone --revision=${{ env.LLVM_COMMIT }} https://github.com/llvm/llvm-project.git
git clone --depth 1 --filter=blob:none --revision=${{ env.LLVM_COMMIT }} https://github.com/llvm/llvm-project.git
cd llvm-project
mkdir build && cd build
cmake -G Ninja ../llvm \
Expand Down
5 changes: 5 additions & 0 deletions include/NeuraDialect/Architecture/Architecture.h
Original file line number Diff line number Diff line change
Expand Up @@ -453,6 +453,7 @@ class Architecture {
Architecture(int multi_cgra_rows, int multi_cgra_columns,
BaseTopology multi_cgra_base_topology = BaseTopology::MESH,
int per_cgra_rows = 4, int per_cgra_columns = 4,
int max_ctrl_mem_items = 20,
BaseTopology per_cgra_base_topology = BaseTopology::MESH,
const TileDefaults &tile_defaults = TileDefaults(),
const std::vector<TileOverride> &tile_overrides =
Expand All @@ -468,6 +469,7 @@ class Architecture {
int getMultiCgraColumns() const { return multi_cgra_columns_; }
int getPerCgraRows() const { return per_cgra_rows_; }
int getPerCgraColumns() const { return per_cgra_columns_; }
int getMaxCtrlMemItems() const { return max_ctrl_mem_items_; }

Link *getLink(int id);
Link *getLink(int src_tile_x, int src_tile_y, int dst_tile_x, int dst_tile_y);
Expand Down Expand Up @@ -520,8 +522,11 @@ class Architecture {
int multi_cgra_columns_;
int per_cgra_rows_;
int per_cgra_columns_;
int max_ctrl_mem_items_;
};

// Function for getting the architecture object.
const Architecture &getArchitecture();
} // namespace neura
} // namespace mlir

Expand Down
7 changes: 0 additions & 7 deletions include/NeuraDialect/Architecture/ArchitectureSpec.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,6 @@ struct LinkOverride {
bool existence = true;
};

// Function for getting the architecture specification file path.
// This is set by the command line tool when a YAML file is provided.
std::string getArchitectureSpecFile();

// Function for getting tile defaults configuration.
TileDefaults getTileDefaults();

} // namespace neura
} // namespace mlir

Expand Down
32 changes: 32 additions & 0 deletions include/NeuraDialect/Util/ArchParser.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#ifndef NEURA_ARCH_PARSER_H
#define NEURA_ARCH_PARSER_H

#include "NeuraDialect/Architecture/Architecture.h"
#include "llvm/Support/YAMLParser.h"
#include "mlir/Support/LogicalResult.h"

namespace mlir {
namespace neura {
namespace util {
class ArchParser {
public:
ArchParser(const std::string &architecture_spec_file);
~ArchParser() = default;

mlir::FailureOr<Architecture> getArchitecture();

private:
std::string architecture_spec_file;
bool parseArchitectureYaml(
llvm::yaml::Document &doc, int &multi_cgra_rows, int &multi_cgra_columns,
mlir::neura::BaseTopology &multi_cgra_base_topology, int &per_cgra_rows,
int &per_cgra_columns, mlir::neura::BaseTopology &per_cgra_base_topology,
int &max_ctrl_mem_items, mlir::neura::TileDefaults &tile_defaults,
std::vector<mlir::neura::TileOverride> &tile_overrides,
mlir::neura::LinkDefaults &link_defaults,
std::vector<mlir::neura::LinkOverride> &link_overrides);
};
} // namespace util
} // namespace neura
} // namespace mlir
#endif // NEURA_ARCH_PARSER_H
38 changes: 38 additions & 0 deletions include/NeuraDialect/Util/ParserUtils.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
#ifndef NEURA_PARSER_UTILS_H
#define NEURA_PARSER_UTILS_H

#include "NeuraDialect/Architecture/ArchitectureSpec.h"
#include "NeuraDialect/Util/NeuraYamlKeys.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/YAMLParser.h"
#include "llvm/Support/raw_ostream.h"

namespace mlir {
namespace neura {
namespace util {
bool parseYamlScalarInt(const llvm::yaml::Node *node, int &result);
bool parseYamlScalarString(const llvm::yaml::Node *node, std::string &result);
void parseYamlStringSequence(llvm::yaml::Node *node,
std::vector<std::string> &result);

bool yamlParseError(const std::string &msg, const std::string &file = "");

void parseTileDefaults(llvm::yaml::MappingNode *tile_defaults_map,
mlir::neura::TileDefaults &tile_defaults);
void parseTileOverrideOperations(llvm::yaml::MappingNode *override_map,
mlir::neura::TileOverride &override);
void parseSingleTileOverride(llvm::yaml::MappingNode *override_map,
mlir::neura::TileOverride &override);
bool parseTileOverrides(llvm::yaml::SequenceNode *tile_overrides_seq,
std::vector<mlir::neura::TileOverride> &tile_overrides);
bool parseLinkDefaults(llvm::yaml::MappingNode *link_defaults_map,
mlir::neura::LinkDefaults &link_defaults);
void parseSingleLinkOverride(llvm::yaml::MappingNode *override_map,
mlir::neura::LinkOverride &override);
bool parseLinkOverrides(llvm::yaml::SequenceNode *link_overrides_seq,
std::vector<mlir::neura::LinkOverride> &link_overrides);
mlir::neura::BaseTopology parseTopologyString(const std::string &topology_str);
} // namespace util
} // namespace neura
} // namespace mlir
#endif // NEURA_PARSER_UTILS_H
6 changes: 4 additions & 2 deletions lib/NeuraDialect/Architecture/Architecture.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#include "NeuraDialect/Architecture/Architecture.h"
#include "NeuraDialect/Architecture/ArchitectureSpec.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
Expand Down Expand Up @@ -389,7 +388,8 @@ void Architecture::createLinkIfValid(int &link_id, Tile *src_tile, int dst_x,
const LinkDefaults &link_defaults) {
if (dst_x >= 0 && dst_x < getPerCgraColumns() && dst_y >= 0 &&
dst_y < getPerCgraRows()) {
// Checks if the destination tile actually exists (not removed by tile_overrides).
// Checks if the destination tile actually exists (not removed by
// tile_overrides).
auto it = coord_to_tile_.find({dst_x, dst_y});
if (it != coord_to_tile_.end()) {
createSingleLink(link_id, src_tile, it->second, link_defaults);
Expand Down Expand Up @@ -553,6 +553,7 @@ void Architecture::applyLinkOverrides(
Architecture::Architecture(int multi_cgra_rows, int multi_cgra_columns,
BaseTopology multi_cgra_base_topology,
int per_cgra_rows, int per_cgra_columns,
int max_ctrl_mem_items,
BaseTopology per_cgra_base_topology,
const TileDefaults &tile_defaults,
const std::vector<TileOverride> &tile_overrides,
Expand All @@ -565,6 +566,7 @@ Architecture::Architecture(int multi_cgra_rows, int multi_cgra_columns,
// this->multi_cgra_base_topology_ = multi_cgra_base_topology;
this->per_cgra_rows_ = per_cgra_rows;
this->per_cgra_columns_ = per_cgra_columns;
this->max_ctrl_mem_items_ = max_ctrl_mem_items;

// Initializes architecture components using helper methods.
initializeTiles(per_cgra_rows, per_cgra_columns);
Expand Down
2 changes: 2 additions & 0 deletions lib/NeuraDialect/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ add_mlir_dialect_library(MLIRNeura
Mapping/HeuristicMapping/HeuristicMapping.cpp
Architecture/Architecture.cpp
Transforms/GraphMining/GraMi.cpp
Util/ArchParser.cpp
Util/ParserUtils.cpp

ADDITIONAL_HEADER_DIRS
${PROJECT_SOURCE_DIR}/include/NeuraDialect
Expand Down
9 changes: 8 additions & 1 deletion lib/NeuraDialect/NeuraPasses.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include "NeuraDialect/NeuraOps.h"
#include "NeuraDialect/NeuraPasses.h"
#include "NeuraDialect/NeuraTypes.h"
#include "mlir/Transforms/ViewOpGraph.h"

std::string filename = "opgraph.dot";
std::error_code EC;
Expand All @@ -20,8 +21,11 @@ void mlir::neura::registerNeuraConversionPassPipeline() {
"neura-conversion", "Convert all dialects to Neura dialect",
[](OpPassManager &pm) {
pm.addPass(mlir::neura::createAssignAcceleratorPass());
// Convert all the other dialects into the Neura dialect

pm.addPass(mlir::createLowerAffineToNeuraPass());
pm.addPass(mlir::createLowerArithToNeuraPass());
pm.addPass(mlir::createLowerMemRefToNeuraPass());
pm.addPass(mlir::createLowerBuiltinToNeuraPass());
pm.addPass(mlir::createLowerLlvmToNeuraPass());
pm.addPass(mlir::createPrintOpGraphPass(os));

Expand All @@ -38,5 +42,8 @@ void mlir::neura::registerNeuraConversionPassPipeline() {
pm.addPass(mlir::neura::createFusePatternPass());
pm.addPass(mlir::neura::createInsertDataMovPass());
pm.addPass(mlir::createPrintOpGraphPass(os));

pm.addPass(mlir::neura::createMapToAcceleratorPass());
pm.addPass(mlir::neura::createGenerateCodePass());
});
}
15 changes: 4 additions & 11 deletions lib/NeuraDialect/Transforms/GenerateCodePass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -267,16 +267,7 @@ struct Topology {

static Topology getTopologyFromArchitecture(int per_cgra_rows, int per_cgra_columns) {
Topology topo;
mlir::neura::Architecture architecture(1,
1,
mlir::neura::BaseTopology::MESH,
per_cgra_rows,
per_cgra_columns,
mlir::neura::BaseTopology::MESH,
mlir::neura::TileDefaults{},
std::vector<mlir::neura::TileOverride>{},
mlir::neura::LinkDefaults{},
std::vector<mlir::neura::LinkOverride>{});
const Architecture &architecture = mlir::neura::getArchitecture();

for (auto *tile : architecture.getAllTiles()) {
topo.tile_location[tile->getId()] = {tile->getX(), tile->getY()};
Expand Down Expand Up @@ -411,7 +402,9 @@ struct GenerateCodePass
}

std::pair<int, int> getArrayDimensions(func::FuncOp function) {
int columns = 4, rows = 4; // default 4x4 CGRA.
const Architecture &architecture = mlir::neura::getArchitecture();
int columns = architecture.getPerCgraColumns();
int rows = architecture.getPerCgraRows();
if (auto mapping_info = function->getAttrOfType<DictionaryAttr>(attr::kMappingInfo)) {
if (auto x_tiles = dyn_cast_or_null<IntegerAttr>(mapping_info.get(attr::kXTiles))) columns = x_tiles.getInt();
if (auto y_tiles = dyn_cast_or_null<IntegerAttr>(mapping_info.get(attr::kYTiles))) rows = y_tiles.getInt();
Expand Down
Loading