Skip to content

Commit 89dcfe6

Browse files
ghpvnistTensorFlow MLIR Team
authored and
TensorFlow MLIR Team
committed
Integrate StableHLO at openxla/stablehlo@271e8634
PiperOrigin-RevId: 620069321
1 parent 1185e20 commit 89dcfe6

File tree

8 files changed

+6
-167
lines changed

8 files changed

+6
-167
lines changed

stablehlo/WORKSPACE.bazel

+2-2
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,9 @@ workspace(name = "stablehlo")
1717

1818
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
1919

20-
LLVM_COMMIT = "7ac7d418ac2b16fd44789dcf48e2b5d73de3e715"
20+
LLVM_COMMIT = "3cf169ca160eaf5464503fbd93d73ee1d8597936"
2121

22-
LLVM_SHA256 = "8b99a146881fbb2a2d8e812724550b2c88fed4403dfb4e133ee8b7107a6a9348"
22+
LLVM_SHA256 = "b63cac687df1bc98e3eb0289f3be6824fcb1b106d0720b5c083417918d1029fd"
2323

2424
http_archive(
2525
name = "llvm-raw",

stablehlo/build_tools/github_actions/lint_whitespace_checks.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ if [[ $# -ne 0 ]] ; then
4040
fi
4141

4242
echo "Gathering changed files..."
43-
mapfile -t CHANGED_FILES < <(git diff "$BASE_BRANCH" HEAD --name-only --diff-filter=d | grep '.*\.cpp$\|.*\.h$\|.*\.md$\|.*\.mlir$\|.*\.sh$\|.*\.td$\|.*\.txt$\|.*\.yml$\|.*\.yaml$')
43+
mapfile -t CHANGED_FILES < <(git diff "$BASE_BRANCH" HEAD --name-only --diff-filter=d | grep -Ev '.*\.(bc|png|svg)$')
4444
if (( ${#CHANGED_FILES[@]} == 0 )); then
4545
echo "No files to check."
4646
exit 0

stablehlo/examples/c++/BUILD.bazel

+2-2
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ package(
1919
)
2020

2121
cc_binary(
22-
name = "example-add",
22+
name = "example_add",
2323
srcs = [
2424
"ExampleAdd.cpp",
2525
],
@@ -33,7 +33,7 @@ cc_binary(
3333
)
3434

3535
cc_test(
36-
name = "example-add-test",
36+
name = "example_add_test",
3737
srcs = [
3838
"ExampleAdd.cpp",
3939
],

stablehlo/stablehlo/dialect/ChloOps.cpp

-35
Original file line numberDiff line numberDiff line change
@@ -412,41 +412,6 @@ LogicalResult BroadcastSelectOp::reifyReturnTypeShapes(
412412
return success();
413413
}
414414

415-
//===----------------------------------------------------------------------===//
416-
// RankSpecializationClusterOp
417-
//===----------------------------------------------------------------------===//
418-
419-
void RankSpecializationClusterOp::getSuccessorRegions(
420-
RegionBranchPoint point, SmallVectorImpl<RegionSuccessor>& regions) {
421-
// RankSpecializationClusterOp has unconditional control flows into the region
422-
// and back to the parent, so return the correct RegionSuccessor purely based
423-
// on the index being None or 0.
424-
if (!point.isParent()) {
425-
regions.push_back(RegionSuccessor(getResults()));
426-
return;
427-
}
428-
regions.push_back(RegionSuccessor(&getBody()));
429-
}
430-
431-
LogicalResult RankSpecializationClusterOp::verify() {
432-
Block* body = SingleBlock::getBody();
433-
if (body->getArgumentTypes() != getOperandTypes())
434-
return emitOpError() << "block argument types must match operand types";
435-
436-
// All operands of nested ops must be defined in the body or declared by the
437-
// cluster.
438-
for (Operation& nested : body->without_terminator()) {
439-
if (!llvm::all_of(nested.getOpOperands(), [&](OpOperand& operand) {
440-
Operation* def = operand.get().getDefiningOp();
441-
if (def != nullptr && def->getBlock() == body) return true;
442-
return llvm::is_contained(body->getArguments(), operand.get());
443-
}))
444-
return emitOpError() << "nested ops must not depend on implicit operands";
445-
}
446-
447-
return success();
448-
}
449-
450415
//===----------------------------------------------------------------------===//
451416
// TopKOp
452417
//===----------------------------------------------------------------------===//

stablehlo/stablehlo/dialect/ChloOps.td

-53
Original file line numberDiff line numberDiff line change
@@ -882,59 +882,6 @@ def CHLO_MinimumBroadcastShapesOp :
882882
let hasVerifier = 1;
883883
}
884884

885-
def CHLO_RankSpecializationClusterOp
886-
: CHLO_Op<"rank_specialization_cluster", [
887-
DeclareOpInterfaceMethods<RegionBranchOpInterface>,
888-
SingleBlockImplicitTerminator<"RankSpecializationClusterYieldOp">,
889-
RecursiveMemoryEffects]> {
890-
891-
let summary = "Cluster of operations that will be rank-specialized together.";
892-
893-
let description = [{
894-
Groups compatible element-wise operatons together so that they can be
895-
rank-specialized together. The operation takes and yields a variadic number
896-
of (unranked) tensor operands. Its body region holds one block with one
897-
block argument per input tensor of the same type. All operations in this
898-
block must only operate on these block arguments. Results are returned
899-
through the `rank_specialization_cluster_yield` operation.
900-
901-
Example:
902-
903-
```
904-
%0 = "chlo.rank_specialization_cluster"(%arg0, %arg1, %arg2) ({
905-
^bb0(%arg0_ : tensor<*xf32>, %arg1_ : tensor<*xf32>, %arg2_ : tensor<*xf32>):
906-
%1 = chlo.broadcast_multiply %arg0_, %arg1_
907-
: (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
908-
%2 = chlo.broadcast_add %1, %arg2_
909-
: (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
910-
"chlo.rank_specialization_cluster_yield"(%2) : (tensor<*xf32>) -> ()
911-
}) : (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
912-
```
913-
}];
914-
915-
let arguments = (ins Variadic<HLO_AnyTensor>);
916-
let results = (outs Variadic<HLO_AnyTensor>);
917-
let regions = (region SizedRegion<1>:$body);
918-
919-
let hasVerifier = 1;
920-
}
921-
922-
def CHLO_RankSpecializationClusterYieldOp
923-
: CHLO_Op<"rank_specialization_cluster_yield", [Pure,
924-
ReturnLike, Terminator, HasParent<"RankSpecializationClusterOp">]> {
925-
926-
let summary = "Yield operation for `rank_specialization_cluster`";
927-
let description = [{
928-
This operation yields the results from within the
929-
`chlo.rank_specialization_cluster` operation's region. The operation takes
930-
an arbitrary number of operands and produces no results. The operand number
931-
and types must match the number and types of the parent
932-
`rank_specialization_cluster` operation's results.
933-
}];
934-
935-
let arguments = (ins Variadic<HLO_AnyTensor>:$results);
936-
}
937-
938885
def CHLO_DynamicReshapeOp: CHLO_Op<"dynamic_reshape", [Pure,
939886
DeclareOpInterfaceMethods<InferShapedTypeOpInterface>]> {
940887
let summary = "Reshape a tensor to a given, possibly dynamic, shape.";

stablehlo/stablehlo/dialect/Version.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ class Version {
3838
static FailureOr<Version> fromString(llvm::StringRef versionRef);
3939

4040
/// Return a Version representing the current VHLO dialect version.
41-
static Version getCurrentVersion() { return Version(0, 19, 3); }
41+
static Version getCurrentVersion() { return Version(0, 19, 4); }
4242

4343
/// Return a Version representing the minimum supported VHLO dialect version.
4444
static Version getMinimumVersion() { return Version(0, 9, 0); }

stablehlo/stablehlo/tests/ops_chlo.mlir

-55
Original file line numberDiff line numberDiff line change
@@ -100,61 +100,6 @@ func.func @minimum_broadcast_shapes_one_operand(%arg: tensor<?xindex>) {
100100

101101
// -----
102102

103-
func.func @rank_specialization_cluster(%arg0 : tensor<*xf32>, %arg1 : tensor<*xf32>,
104-
%arg2 : tensor<*xf32>) -> tensor<*xf32> {
105-
%0 = "chlo.rank_specialization_cluster"(%arg0, %arg1, %arg2) ({
106-
^bb0(%arg0_ : tensor<*xf32>, %arg1_ : tensor<*xf32>, %arg2_ : tensor<*xf32>):
107-
%1 = chlo.broadcast_multiply %arg0_, %arg1_
108-
: (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
109-
%2 = chlo.broadcast_add %1, %arg2_
110-
: (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
111-
"chlo.rank_specialization_cluster_yield"(%2) : (tensor<*xf32>) -> ()
112-
}) : (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
113-
func.return %0 : tensor<*xf32>
114-
}
115-
116-
// -----
117-
118-
func.func @rank_specialization_cluster(%arg0 : tensor<*xf32>,
119-
%arg1 : tensor<*xf32>) -> tensor<*xf32> {
120-
// expected-error @+1{{source has 2 operands, but target successor needs 1}}
121-
%0 = "chlo.rank_specialization_cluster"(%arg0, %arg1) ({
122-
^bb0(%arg0_ : tensor<*xf32>, %arg1_ : tensor<*xf32>):
123-
"chlo.rank_specialization_cluster_yield"(%arg0_, %arg1_)
124-
: (tensor<*xf32>, tensor<*xf32>) -> ()
125-
}) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
126-
func.return %0 : tensor<*xf32>
127-
}
128-
129-
// -----
130-
131-
func.func @rank_specialization_cluster(%arg0 : tensor<*xf32>) -> tensor<*xf32> {
132-
// expected-error @+1{{block argument types must match operand types}}
133-
%0 = "chlo.rank_specialization_cluster"(%arg0) ({
134-
^bb0(%arg0_ : tensor<*xf32>, %arg1_ : tensor<*xf32>):
135-
"chlo.rank_specialization_cluster_yield"(%arg0_) : (tensor<*xf32>) -> ()
136-
}) : (tensor<*xf32>) -> tensor<*xf32>
137-
func.return %0 : tensor<*xf32>
138-
}
139-
140-
// -----
141-
142-
func.func @rank_specialization_cluster(%arg0 : tensor<*xf32>, %arg1 : tensor<*xf32>,
143-
%arg2 : tensor<*xf32>) -> tensor<*xf32> {
144-
// expected-error @+1{{nested ops must not depend on implicit operands}}
145-
%0 = "chlo.rank_specialization_cluster"(%arg0, %arg1, %arg2) ({
146-
^bb0(%arg0_ : tensor<*xf32>, %arg1_ : tensor<*xf32>, %arg2_ : tensor<*xf32>):
147-
%1 = chlo.broadcast_multiply %arg0_, %arg1_
148-
: (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
149-
%2 = chlo.broadcast_add %1, %arg2
150-
: (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
151-
"chlo.rank_specialization_cluster_yield"(%2) : (tensor<*xf32>) -> ()
152-
}) : (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
153-
func.return %0 : tensor<*xf32>
154-
}
155-
156-
// -----
157-
158103
func.func @top_k(%arg0 : tensor<f32>) {
159104
// expected-error @+2 {{failed to infer returned types}}
160105
// @expected-error @+1{{operand's rank must be at least 1}}

stablehlo/stablehlo/tests/ops_chlo_roundtrip.mlir

-18
Original file line numberDiff line numberDiff line change
@@ -417,24 +417,6 @@ func.func @chlo_reshape_dynamic(%arg0: tensor<?xf32>, %arg1: tensor<2xi32>) -> t
417417
func.return %0 : tensor<?x?xf32>
418418
}
419419

420-
// CHECK-LABEL: func @chlo_rank_specialization_cluster
421-
// CHECK-SAME: %[[A0:.*0]]: tensor<*xf32>,
422-
// CHECK-SAME: %[[A1:.*1]]: tensor<*xf32>,
423-
// CHECK-SAME: %[[A2:.*2]]: tensor<*xf32>)
424-
// CHECK-NEXT: %[[T:.*]] = "chlo.rank_specialization_cluster"(%[[A0]], %[[A1]], %[[A2]])
425-
// CHECK: ^bb0(%[[A3:.*]]: tensor<*xf32>, %[[A4:.*]]: tensor<*xf32>, %[[A5:.*]]: tensor<*xf32>):
426-
// CHECK: "chlo.rank_specialization_cluster_yield"(%[[A3]]) : (tensor<*xf32>) -> ()
427-
// CHECK: }) : (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
428-
// CHECK: return %[[T]] : tensor<*xf32>
429-
func.func @chlo_rank_specialization_cluster(%arg0 : tensor<*xf32>, %arg1 : tensor<*xf32>,
430-
%arg2 : tensor<*xf32>) -> tensor<*xf32> {
431-
%0 = "chlo.rank_specialization_cluster"(%arg0, %arg1, %arg2) ({
432-
^bb0(%arg0_ : tensor<*xf32>, %arg1_ : tensor<*xf32>, %arg2_ : tensor<*xf32>):
433-
"chlo.rank_specialization_cluster_yield"(%arg0_) : (tensor<*xf32>) -> ()
434-
}) : (tensor<*xf32>, tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
435-
func.return %0 : tensor<*xf32>
436-
}
437-
438420
// CHECK-LABEL: func @chlo_erf_inv
439421
// CHECK-SAME: %[[A0:.*0]]: tensor<16x16xf32>)
440422
// CHECK: chlo.erf_inv %[[A0]] : tensor<16x16xf32> -> tensor<16x16xf32>

0 commit comments

Comments
 (0)