Skip to content

Commit

Permalink
Refresh clang-format
Browse files Browse the repository at this point in the history
After noticing a number of commits with unrelated formatting changes,
I think something was changed with clang-format at one point and we're
seeing a number of unrelated changes. Doing a refresh can help avoid
this.

This can be reproduced with
```
find lib -iname *.h -o -iname *.cpp  | xargs clang-format -i --style=llvm
find include -iname *.h -o -iname *.cpp  | xargs clang-format -i --style=llvm
find projects -iname *.h -o -iname *.cpp  | xargs clang-format -i --style=llvm
```
  • Loading branch information
qedawkins committed Jan 29, 2024
1 parent d3fd754 commit 9a4741a
Show file tree
Hide file tree
Showing 81 changed files with 1,952 additions and 1,797 deletions.
2 changes: 1 addition & 1 deletion include/torch-mlir-c/Dialects.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,4 @@ MLIR_DECLARE_CAPI_DIALECT_REGISTRATION(Torch, torch);
}
#endif

#endif // TORCHMLIR_C_DIALECTS_H
#endif // TORCHMLIR_C_DIALECTS_H
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@
#ifndef TORCH_MLIR_DIALECTS_DIALECT_TMTENSOR_IR_TMTENSORINTERFACES_H_
#define TORCH_MLIR_DIALECTS_DIALECT_TMTENSOR_IR_TMTENSORINTERFACES_H_

#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/Support/LLVM.h"

Expand Down
7 changes: 4 additions & 3 deletions include/torch-mlir/Conversion/TorchOnnxToTorch/Patterns.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ struct OpBinder {
return failure();
return success();
}
ParseResult tensorOperandsList( llvm::SmallVectorImpl<Value> &values) {

ParseResult tensorOperandsList(llvm::SmallVectorImpl<Value> &values) {
for (uint32_t i = 0; i < op->getNumOperands(); i++) {
values.push_back(op->getOperand(i));
}
Expand All @@ -97,7 +97,8 @@ struct OpBinder {
return success();
}

ParseResult tensorResultTypeAtIndex(Torch::ValueTensorType &typeIdx, int64_t idx) {
ParseResult tensorResultTypeAtIndex(Torch::ValueTensorType &typeIdx,
int64_t idx) {
if (idx >= op->getNumResults())
return failure();
auto t = toValidTensorType(op->getResult(idx).getType());
Expand Down
20 changes: 9 additions & 11 deletions include/torch-mlir/Conversion/TorchToTosa/TosaLegalizeCommon.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,33 +37,31 @@ TosaOpT createBinaryOpAndCast(PatternRewriter &rewriter, Operation *op,
return CreateOpAndInfer<TosaOpT>(rewriter, op->getLoc(), outType, lhs, rhs);
}

// This specialization is for Div op. Unlike other binary ops, it doesn't support
// floating type.
// This specialization is for Div op. Unlike other binary ops, it doesn't
// support floating type.
template <>
tosa::DivOp createBinaryOpAndCast<DivOp>(PatternRewriter &rewriter,
Operation *op, TensorType outType,
Value lhs, Value rhs);

std::optional<Value> convertTorchIndexToTfIndices(PatternRewriter &rewriter,
Operation *op,
Value params_value,
Value index_value,
int32_t axis);
Operation *op,
Value params_value,
Value index_value,
int32_t axis);

// Lowers torch.aten.Gather operators to a sequence of TOSA ops.
// Revised from
// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/compiler/mlir/tosa/transforms/legalize_common.cc
std::optional<Value> convertGatherNdOp(PatternRewriter &rewriter,
Operation *op, Type out_type,
Value params_value,
Value indices_value);
std::optional<Value> convertGatherNdOp(PatternRewriter &rewriter, Operation *op,
Type out_type, Value params_value,
Value indices_value);

std::optional<Value> convertScatterNdOp(PatternRewriter &rewriter,
Operation *op, Type outType,
Value paramsValue, Value indicesValue,
Value fillValues);


// Lowers ReduceAll to a sequence of TOSA ops.
std::optional<Value>
convertReduceAllOp(PatternRewriter &rewriter, Operation *op,
Expand Down
6 changes: 3 additions & 3 deletions include/torch-mlir/Conversion/TorchToTosa/TosaLegalizeUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ Value promoteType(PatternRewriter &rewriter, Value input, TensorType outType);
// op. This allows shape inference during the framework to TOSA lowering.
template <typename TosaOp, typename... Args>
TosaOp CreateOpAndInfer(PatternRewriter &rewriter, Location loc, Type result_ty,
Args &&... args) {
Args &&...args) {
auto op = rewriter.create<TosaOp>(loc, result_ty, args...);

InferShapedTypeOpInterface shapeInterface =
Expand Down Expand Up @@ -111,15 +111,15 @@ TosaOp CreateOpAndInfer(PatternRewriter &rewriter, Location loc, Type result_ty,

template <typename TosaOp, typename... Args>
void CreateReplaceOpAndInfer(PatternRewriter &rewriter, Operation *op,
Type result_ty, Args &&... args) {
Type result_ty, Args &&...args) {
auto result =
CreateOpAndInfer<TosaOp>(rewriter, op->getLoc(), result_ty, args...);
rewriter.replaceOp(op, result->getResults());
}

// Get accumulator type for AvgPool2dOp.
LogicalResult getAvgPool2dAccType(PatternRewriter &rewriter, Value input,
TypeAttr &accType);
TypeAttr &accType);

} // namespace tosa
} // namespace mlir
Expand Down
6 changes: 3 additions & 3 deletions include/torch-mlir/Dialect/Torch/IR/TorchTraits.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,7 @@ class HasValueSemantics
// This is a weaker form of HasValueSemantics, since that trait also requires no
// aliasing. That is, HasValueSemantics implies this trait.
template <typename ConcreteType>
class ReadOnly
: public ::mlir::OpTrait::TraitBase<ConcreteType, ReadOnly> {};
class ReadOnly : public ::mlir::OpTrait::TraitBase<ConcreteType, ReadOnly> {};

// If a Torch op has this trait, it means that the op is a "trailing underscore"
// op variant that performs an in-place operation on its first argument. These
Expand All @@ -62,7 +61,8 @@ class AllowsTypeRefinement
// by the IValue importer.
template <typename ConcreteType>
class AllowedInModuleInitializer
: public ::mlir::OpTrait::TraitBase<ConcreteType, AllowedInModuleInitializer> {};
: public ::mlir::OpTrait::TraitBase<ConcreteType,
AllowedInModuleInitializer> {};

} // namespace OpTrait
} // namespace Torch
Expand Down
6 changes: 3 additions & 3 deletions include/torch-mlir/Dialect/Torch/Transforms/Passes.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ struct TorchLoweringPipelineOptions

Option<std::string> extraLibrary{
*this, "extra-library",
llvm::cl::desc("Filename of MLIR module for splicing into the abstract interpretation library.")};
llvm::cl::desc("Filename of MLIR module for splicing into the abstract "
"interpretation library.")};
};

/// Creates a pipeline that lowers the object graph IR that is produced by
Expand Down Expand Up @@ -125,8 +126,7 @@ createSimplifyDtypeCalculationsPass();
std::unique_ptr<OperationPass<func::FuncOp>>
createDropAbstractInterpCalculationsPass();

std::unique_ptr<OperationPass<ModuleOp>>
createEraseModuleInitializerPass();
std::unique_ptr<OperationPass<ModuleOp>> createEraseModuleInitializerPass();

std::unique_ptr<OperationPass<ModuleOp>>
createLowerToBackendContractPass(int maxIterations, bool decompose,
Expand Down
7 changes: 1 addition & 6 deletions include/torch-mlir/Dialect/Torch/Utils/TorchUpstream.h
Original file line number Diff line number Diff line change
Expand Up @@ -140,12 +140,7 @@ enum Reduction { None, Mean, Sum, END };
// Source:
// https://github.com/pytorch/pytorch/blob/master/c10/core/MemoryFormat.h
//===----------------------------------------------------------------------===//
enum MemoryFormat {
Contiguous,
Preserve,
ChannelsLast,
ChannelsLast3d
};
enum MemoryFormat { Contiguous, Preserve, ChannelsLast, ChannelsLast3d };

//===----------------------------------------------------------------------===//
// Possible values for `layout` argument in PyTorch ops that support it.
Expand Down
3 changes: 1 addition & 2 deletions include/torch-mlir/Dialect/Torch/Utils/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,7 @@ LogicalResult checkDefaultStrideHelper(Operation *op, PatternRewriter &rewriter,
// Helper to create a tensor filled with the given scalar. Scalar would be
// converted the to the element type of the given tensor type.
Value createInitTensor(PatternRewriter &rewriter, Location loc,
BaseTensorType resultType, Value scalar,
Value sizeList);
BaseTensorType resultType, Value scalar, Value sizeList);

// Helper to create a rank 0 tensor filled with the given `scalar`. `scalar`
// would be converted to the element type of the given `inputType`.
Expand Down
5 changes: 3 additions & 2 deletions lib/CAPI/Dialects.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@

#include "torch-mlir-c/Dialects.h"

#include "torch-mlir/Dialect/Torch/IR/TorchDialect.h"
#include "mlir/CAPI/Registration.h"
#include "torch-mlir/Dialect/Torch/IR/TorchDialect.h"

MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(Torch, torch, mlir::torch::Torch::TorchDialect)
MLIR_DEFINE_CAPI_DIALECT_REGISTRATION(Torch, torch,
mlir::torch::Torch::TorchDialect)
4 changes: 1 addition & 3 deletions lib/Conversion/Passes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,4 @@ namespace {
#include "torch-mlir/Conversion/Passes.h.inc"
} // end namespace

void mlir::torch::registerConversionPasses() {
::registerPasses();
}
void mlir::torch::registerConversionPasses() { ::registerPasses(); }
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,8 @@ class ConvertGetNextSeedOp : public OpConversionPattern<GetNextSeedOp> {
// temp = multiplier * currentSeed + incrementStep
Value mul = rewriter.create<arith::MulIOp>(loc, currentSeed, multiplier);
Value seed = rewriter.create<arith::AddIOp>(loc, mul, incrementStep);
globalVar = rewriter.create<tensor::InsertOp>(loc, seed, globalVar, ValueRange());
globalVar =
rewriter.create<tensor::InsertOp>(loc, seed, globalVar, ValueRange());
rewriter.create<ml_program::GlobalStoreOp>(
loc, SymbolRefAttr::get(op->getContext(), getSeedGobalVarName()),
globalVar);
Expand Down
105 changes: 53 additions & 52 deletions lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ using namespace mlir::torch::onnx_c;
// thing here, so we simplify.
void mlir::torch::onnx_c::populateDefaultDomainGtoP(
OnnxCustomOpConversionPattern &patterns) {
patterns.onOp("HardSigmoid", 6,
patterns.onOp(
"HardSigmoid", 6,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
Value tensorOperand;
Expand All @@ -39,8 +40,9 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
binder.f32FloatAttr(beta, "beta", 0.5f) ||
binder.tensorResultType(resultType))
return failure();

// HardSigmoid computes the following expression: max(0, min(1, alpha * x + beta))

// HardSigmoid computes the following expression: max(0, min(1, alpha *
// x + beta))
Value constAlpha = rewriter.create<Torch::ConstantFloatOp>(
binder.getLoc(), rewriter.getType<Torch::FloatType>(),
rewriter.getF64FloatAttr(alpha));
Expand All @@ -51,7 +53,8 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(

// Expression: alpha * x + beta
Value alpha_x_plus_beta = rewriter.create<Torch::AtenAddScalarOp>(
binder.getLoc(), resultType, tensorOperand, constBeta, /*alpha=*/constAlpha);
binder.getLoc(), resultType, tensorOperand, constBeta,
/*alpha=*/constAlpha);

// Expression: min(1, alpha * x + beta)
Value constantOne = rewriter.create<Torch::ConstantIntOp>(
Expand Down Expand Up @@ -100,7 +103,7 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
rewriter.replaceOpWithNewOp<Torch::AtenLtTensorOp>(
binder.op, resultType, lhs, rhs);
return success();
});
});
patterns.onOp("LessOrEqual", 1,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
Expand All @@ -109,9 +112,9 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
binder.tensorResultType(resultType)) {
return failure();
}
rewriter.replaceOpWithNewOp<Torch::AtenLeTensorOp>(
rewriter.replaceOpWithNewOp<Torch::AtenLeTensorOp>(
binder.op, resultType, lhs, rhs);
return success();
return success();
});
patterns.onOp("Log", 1,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Expand All @@ -126,7 +129,7 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
return success();
});
patterns.onOp("MatMul", 13,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
Value lhs, rhs;
if (binder.tensorOperands(lhs, rhs) ||
Expand Down Expand Up @@ -206,20 +209,20 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
return success();
});
patterns.onOp("Mul", 7,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
Value lhs, rhs;
if (binder.tensorOperands(lhs, rhs) ||
binder.tensorResultType(resultType)) {
return failure();
}
rewriter.replaceOpWithNewOp<Torch::AtenMulTensorOp>(
binder.op, resultType, lhs, rhs);
return success();
});
binder.op, resultType, lhs, rhs);
return success();
});
patterns.onOp("NonZero", 13,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
Value operand;
if (binder.tensorOperand(operand) ||
binder.tensorResultType(resultType)) {
Expand Down Expand Up @@ -332,41 +335,38 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
binder.op, resultType, lhs, rhs);
return success();
});
patterns.onOp("Max", 1,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
llvm::SmallVector<Value> operands;
if (binder.tensorOperandsList(operands) ||
binder.tensorResultType(resultType) ||
operands.size() == 0) {
return failure();
}
Value result = operands[0];
for (uint64_t i = 1; i < operands.size(); i++) {
result = rewriter.create<Torch::AtenMaximumOp>(
binder.getLoc(), resultType, result, operands[i]);
}
rewriter.replaceOp(binder.op, result.getDefiningOp());
return success();
});
patterns.onOp("Min", 1,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
llvm::SmallVector<Value> operands;
if (binder.tensorOperandsList(operands) ||
binder.tensorResultType(resultType) ||
operands.size() == 0) {
return failure();
}
Value result = operands[0];
for (uint64_t i = 1; i < operands.size(); i++) {
result = rewriter.create<Torch::AtenMinimumOp>(
binder.getLoc(), resultType, result, operands[i]);
}
rewriter.replaceOp(
binder.op, result.getDefiningOp());
return success();
});
patterns.onOp(
"Max", 1, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
llvm::SmallVector<Value> operands;
if (binder.tensorOperandsList(operands) ||
binder.tensorResultType(resultType) || operands.size() == 0) {
return failure();
}
Value result = operands[0];
for (uint64_t i = 1; i < operands.size(); i++) {
result = rewriter.create<Torch::AtenMaximumOp>(
binder.getLoc(), resultType, result, operands[i]);
}
rewriter.replaceOp(binder.op, result.getDefiningOp());
return success();
});
patterns.onOp(
"Min", 1, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
llvm::SmallVector<Value> operands;
if (binder.tensorOperandsList(operands) ||
binder.tensorResultType(resultType) || operands.size() == 0) {
return failure();
}
Value result = operands[0];
for (uint64_t i = 1; i < operands.size(); i++) {
result = rewriter.create<Torch::AtenMinimumOp>(
binder.getLoc(), resultType, result, operands[i]);
}
rewriter.replaceOp(binder.op, result.getDefiningOp());
return success();
});
patterns.onOp("Neg", 1,
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType;
Expand Down Expand Up @@ -693,7 +693,8 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
binder.getLoc(),
Torch::ListType::get(Torch::IntType::get(binder.op->getContext())),
cstStrides);
Value cstFalse = rewriter.create<Torch::ConstantBoolOp>(binder.getLoc(), false);
Value cstFalse =
rewriter.create<Torch::ConstantBoolOp>(binder.getLoc(), false);
Value cstCeilMode = cstFalse;
Value cstCountIncludePad = cstFalse;
Value cstNone = rewriter.create<Torch::ConstantNoneOp>(binder.getLoc());
Expand Down Expand Up @@ -903,7 +904,7 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
return failure();
}
rewriter.replaceOpWithNewOp<Torch::AtenPowTensorTensorOp>(
binder.op, resultType, lhs, rhs);
binder.op, resultType, lhs, rhs);
return success();
});
patterns.onOp(
Expand Down
Loading

0 comments on commit 9a4741a

Please sign in to comment.