Skip to content

Commit

Permalink
Fix clang-tidy errors, closes #189
Browse files Browse the repository at this point in the history
  • Loading branch information
nsmithtt committed Sep 4, 2024
1 parent e9b4720 commit 305ab41
Show file tree
Hide file tree
Showing 16 changed files with 97 additions and 101 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
Checks: 'clang-diagnostic-*,clang-analyzer-*,llvm-*,google-global-names-in-headers,google-readability-*,cppcoreguidelines-virtual-class-destructor,cppcoreguidelines-use-default-member-init,cppcoreguidelines-owning-memory,cppcoreguidelines-interfaces-global-init,cppcoreguidelines-explicit-virtual-functions,cppcoreguidelines-avoid-const-or-ref-data-members,cppcoreguidelines-avoid-goto,cppcoreguidelines-avoid-non-const-global-variables'
Checks: 'clang-diagnostic-*,clang-analyzer-*,llvm-*,google-global-names-in-headers,google-readability-*,cppcoreguidelines-virtual-class-destructor,cppcoreguidelines-use-default-member-init,cppcoreguidelines-owning-memory,cppcoreguidelines-interfaces-global-init,cppcoreguidelines-explicit-virtual-functions,cppcoreguidelines-avoid-const-or-ref-data-members,cppcoreguidelines-avoid-goto,cppcoreguidelines-avoid-non-const-global-variables,-clang-analyzer-unix.Malloc'
WarningsAsErrors: ''
HeaderFileExtensions:
- h
Expand Down
1 change: 0 additions & 1 deletion .github/workflows/docker-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ jobs:
run: |
source env/activate
cmake --build ${{ steps.strings.outputs.build-output-dir }} --config ${{ matrix.build.build_type }} -- clang-tidy
continue-on-error: true
- name: Run Test
shell: bash
Expand Down
4 changes: 0 additions & 4 deletions include/ttmlir/Dialect/TTKernel/IR/TTKernelOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,6 @@ def TTKernel_AcquireDstOp : TTKernel_Op<"acquire_dst"> {
let description = [{
Aquire dest operation
}];

//TODO: Add MemRefAttr
//let arguments = (ins MemRefAttr:$view);
//let hasVerifier = 1;
}

def TTKernel_ReleaseDstOp : TTKernel_Op<"release_dst"> {
Expand Down
13 changes: 6 additions & 7 deletions lib/Dialect/TT/IR/TTOpsTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ mlir::tt::SystemDescAttr::getFromPath(MLIRContext *context, std::string &path) {
fbb.read(static_cast<char *>(buffer.get()), size);

// Read relevant information from binary
auto binary_system_desc =
auto const *binary_system_desc =
::tt::target::GetSizePrefixedSystemDescRoot(buffer.get())->system_desc();
auto const *binary_chip_desc = binary_system_desc->chip_descs();
auto const *binary_chip_desc_indices =
Expand All @@ -131,11 +131,10 @@ mlir::tt::SystemDescAttr::getFromPath(MLIRContext *context, std::string &path) {

// Acquire chip descs
std::vector<tt::ChipDescAttr> chip_desc_list;
for (auto element : *binary_chip_desc) {

for (auto const *element : *binary_chip_desc) {
std::vector<tt::CoreCoordAttr> worker_cores, dram_cores, eth_cores,
eth_inactive_cores;
auto physical_cores = element->physical_cores();
auto const *physical_cores = element->physical_cores();

// Populate all vecrors with CoreCoordAttr instances
for (auto const &core : *physical_cores->worker()) {
Expand Down Expand Up @@ -229,7 +228,7 @@ mlir::tt::SystemDescAttr::getFromPath(MLIRContext *context, std::string &path) {

SmallVector<tt::TileSizeAttr> supported_tile_sizes_attr;

for (auto it : *(element->supported_tile_sizes())) {
for (auto const *it : *(element->supported_tile_sizes())) {
supported_tile_sizes_attr.push_back(
tt::TileSizeAttr::get(context, it->y(), it->x()));
}
Expand Down Expand Up @@ -274,14 +273,14 @@ mlir::tt::SystemDescAttr::getFromPath(MLIRContext *context, std::string &path) {

// Acquire chip coordinates
std::vector<tt::ChipCoordAttr> chip_coordinate_list;
for (auto element : *binary_chip_coords) {
for (auto const *element : *binary_chip_coords) {
auto chip_coordinate_attr = tt::ChipCoordAttr::get(
context, element->rack(), element->shelf(), element->y(), element->x());
chip_coordinate_list.push_back(chip_coordinate_attr);
}

std::vector<tt::ChipChannelAttr> chip_channel_list;
for (auto element : *chip_channel_connections) {
for (auto const *element : *chip_channel_connections) {
std::vector<int64_t> ethernet_core_coord0_vec = {
element->ethernet_core_coord0().y(),
element->ethernet_core_coord0().x()};
Expand Down
5 changes: 3 additions & 2 deletions lib/Dialect/TTIR/IR/TTIROpsInterfaces.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@
//
// SPDX-License-Identifier: Apache-2.0

#include "ttmlir/Dialect/TTIR/IR/TTIROpsInterfaces.h"
#include <cstdint>

#include "ttmlir/Dialect/TTIR/IR/TTIR.h"
#include "ttmlir/Dialect/TTIR/IR/TTIROpsInterfaces.h"

#include <cstdint>
#include <llvm/ADT/ArrayRef.h>
#include <mlir/IR/ValueRange.h>

Expand Down
13 changes: 7 additions & 6 deletions lib/Dialect/TTIR/Transforms/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -473,14 +473,15 @@ getLegalTensorMemoryLayout(OperandConstraint operandConstraint,
if (defaultDeviceMemLayout == TensorMemoryLayout::None) {
return TensorMemoryLayout::None;
}

if (isSystemMemorySpace(targetMemorySpace)) {
return TensorMemoryLayout::None;
} else {
assert(isDeviceMemorySpace(targetMemorySpace));
if (bitEnumContainsAny(operandConstraint, memoryLayoutAsOperandConstraint(
defaultDeviceMemLayout))) {
return defaultDeviceMemLayout;
}
}

assert(isDeviceMemorySpace(targetMemorySpace));
if (bitEnumContainsAny(operandConstraint, memoryLayoutAsOperandConstraint(
defaultDeviceMemLayout))) {
return defaultDeviceMemLayout;
}

std::map<OperandConstraint, TensorMemoryLayout> validLayoutsMap = {
Expand Down
9 changes: 5 additions & 4 deletions lib/Dialect/TTMetal/Transforms/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
//
// SPDX-License-Identifier: Apache-2.0

#include <cstdint>

#include "ttmlir/Dialect/TTMetal/Transforms/Passes.h"

#include "mlir/Analysis/Liveness.h"
Expand All @@ -18,8 +20,7 @@
#include "ttmlir/Dialect/TTIR/IR/TTIROps.h"
#include "ttmlir/Dialect/TTIR/Transforms/Passes.h"
#include "llvm/ADT/MapVector.h"
#include <cstdint>
#include <llvm/ADT/SmallVector.h>
#include "llvm/ADT/SmallVector.h"

#include "ttmlir/Dialect/TT/Utils/PhysicalCoreCoord.h"
#include "ttmlir/Dialect/TTKernel/IR/TTKernel.h"
Expand All @@ -44,7 +45,7 @@ namespace mlir::tt::ttmetal {
static uint64_t lookupAddress(Value value) {
auto blockArg = mlir::dyn_cast<BlockArgument>(value);
if (blockArg) {
auto funcOp = blockArg.getOwner()->getParentOp();
auto *funcOp = blockArg.getOwner()->getParentOp();
if (mlir::isa<func::FuncOp>(funcOp)) {
auto argAlloc = mlir::cast<ArgumentAllocationAttr>(
mlir::cast<ArrayAttr>(funcOp->getDiscardableAttr(
Expand Down Expand Up @@ -528,7 +529,7 @@ class TTIRToTTMetalDispatchRewriter : public OpRewritePattern<ttir::GenericOp> {
for (BlockArgument operand : blockArguments) {
auto cbType = mlir::cast<ttkernel::CBType>(operand.getType());
AffineMap affineIterator = getAffineIterator(cbType.getMemref());
auto match = iteratorMaps.find(affineIterator);
auto *match = iteratorMaps.find(affineIterator);
assert(match != iteratorMaps.end());
blockArgIteratorMapping[operand.getArgNumber()] =
std::distance(iteratorMaps.begin(), match);
Expand Down
6 changes: 4 additions & 2 deletions lib/Dialect/TTNN/IR/TTNNOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,10 @@ ::mlir::LogicalResult mlir::tt::ttnn::ToMemoryConfigOp::verify() {
if (::mlir::tt::isSystemMemorySpace(outputMemorySpace) &&
outputMemoryLayout != ::mlir::tt::TensorMemoryLayout::None) {
return emitOpError("System memory space only supports undef memory layout");
} else if (::mlir::tt::isDeviceMemorySpace(outputMemorySpace) &&
!isValidDeviceLayout(outputMemoryLayout)) {
}

if (::mlir::tt::isDeviceMemorySpace(outputMemorySpace) &&
!isValidDeviceLayout(outputMemoryLayout)) {
return emitOpError("Device memory space only supports interleaved or "
"sharded memory layouts");
}
Expand Down
1 change: 1 addition & 0 deletions lib/Target/TTNN/TTNNToFlatbuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
// SPDX-License-Identifier: Apache-2.0

#include <fstream>

#include <llvm/Support/Casting.h>

#include "mlir/Dialect/EmitC/IR/EmitC.h"
Expand Down
4 changes: 3 additions & 1 deletion python/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ void populatePassesModule(py::module &m) {
mlir::MLIRContext *ctx = unwrap(mlirModuleGetContext(module));
ctx->appendDialectRegistry(registry);

const auto pipeline =
const auto *pipeline =
mlir::PassPipelineInfo::lookup("ttir-to-ttnn-backend-pipeline");

mlir::function_ref<mlir::LogicalResult(const llvm::Twine &)>
Expand All @@ -53,13 +53,15 @@ void populatePassesModule(py::module &m) {
});

m.def("ttnn_to_flatbuffer_binary", [](MlirModule module) {
// NOLINTBEGIN
mlir::Operation *moduleOp = unwrap(mlirModuleGetOperation(module));
std::shared_ptr<void> *binary = new std::shared_ptr<void>();
*binary = mlir::tt::ttnn::ttnnToFlatbuffer(moduleOp);
return py::capsule((void *)binary, [](void *data) {
std::shared_ptr<void> *bin = static_cast<std::shared_ptr<void> *>(data);
delete bin;
});
// NOLINTEND
});
}

Expand Down
2 changes: 1 addition & 1 deletion runtime/include/tt/runtime/runtime.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ std::pair<SystemDesc, DeviceIds> getCurrentSystemDesc();

namespace detail {
void deallocateBuffers(Device device);
}
} // namespace detail

DeviceRuntime getCurrentRuntime();

Expand Down
4 changes: 2 additions & 2 deletions runtime/lib/common/system_desc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -263,8 +263,8 @@ getCurrentSystemDescImpl(const ::tt::tt_metal::DeviceMesh &deviceMesh) {
std::pair<::tt::runtime::SystemDesc, DeviceIds> getCurrentSystemDesc() {
size_t numDevices = ::tt::tt_metal::GetNumAvailableDevices();
size_t numPciDevices = ::tt::tt_metal::GetNumPCIeDevices();
TT_FATAL(numDevices % numPciDevices == 0,
"Unexpected non-rectangular grid of devices");
assert(numDevices % numPciDevices == 0 &&
"Unexpected non-rectangular grid of devices");
std::vector<chip_id_t> deviceIds(numDevices);
std::iota(deviceIds.begin(), deviceIds.end(), 0);
::tt::tt_metal::DeviceGrid grid =
Expand Down
3 changes: 2 additions & 1 deletion runtime/lib/runtime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,15 @@
namespace tt::runtime {

namespace detail {
// NOLINTBEGIN
#if defined(TT_RUNTIME_ENABLE_TTNN)
DeviceRuntime globalCurrentRuntime = DeviceRuntime::TTNN;
#elif defined(TT_RUNTIME_ENABLE_TTMETAL)
DeviceRuntime globalCurrentRuntime = DeviceRuntime::TTMetal;
#else
DeviceRuntime globalCurrentRuntime = DeviceRuntime::Disabled;
#endif
// NOLINTEND

void deallocateBuffers(Device device) {
#if defined(TT_RUNTIME_ENABLE_TTNN)
Expand All @@ -40,7 +42,6 @@ void deallocateBuffers(Device device) {
#endif
throw std::runtime_error("runtime is not enabled");
}

} // namespace detail

DeviceRuntime getCurrentRuntime() {
Expand Down
15 changes: 9 additions & 6 deletions runtime/lib/ttmetal/runtime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,10 @@ prepareInput(::tt::tt_metal::Device *device, MetalTensor const &metalTensor,
::tt::tt_metal::EnqueueWriteBuffer(cq, buffer, data, blocking);
::tt::tt_metal::EnqueueRecordEvent(cq, event);
return std::make_pair(buffer, event);
} else if (std::holds_alternative<std::shared_ptr<::tt::tt_metal::Buffer>>(
metalTensor)) {
}

if (std::holds_alternative<std::shared_ptr<::tt::tt_metal::Buffer>>(
metalTensor)) {
std::shared_ptr<::tt::tt_metal::Buffer> buffer =
std::get<std::shared_ptr<::tt::tt_metal::Buffer>>(metalTensor);
throw std::runtime_error("Input from buffer not supported yet");
Expand All @@ -117,10 +119,11 @@ prepareOutput(::tt::tt_metal::Device *device, MetalTensor const *metalTensor,
if (TensorDesc const *hostTensorDesc = std::get_if<TensorDesc>(metalTensor);
hostTensorDesc) {
return createBufferFromTensorRef(device, tensorRef);
} else if (std::shared_ptr<::tt::tt_metal::Buffer> const *buffer =
std::get_if<std::shared_ptr<::tt::tt_metal::Buffer>>(
metalTensor);
buffer) {
}

if (std::shared_ptr<::tt::tt_metal::Buffer> const *buffer =
std::get_if<std::shared_ptr<::tt::tt_metal::Buffer>>(metalTensor);
buffer) {
return *buffer;
}
assert(false && "Unsupported tensor type");
Expand Down
Loading

0 comments on commit 305ab41

Please sign in to comment.