Skip to content

Commit

Permalink
[XLA] Modify comments in ragged all-to-all HLO.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 695425829
  • Loading branch information
Google-ML-Automation committed Nov 11, 2024
1 parent 3707e6a commit 42ea5b4
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 11 deletions.
4 changes: 2 additions & 2 deletions xla/hlo/ir/hlo_instruction.h
Original file line number Diff line number Diff line change
Expand Up @@ -1067,10 +1067,10 @@ class HloInstruction {
// The ragged all-to-all HLO has the following arguments:
// input: ragged input data tensor.
// input_offsets: ragged input offsets tensor.
// input_sizes: ragged input sizes tensor.
// send_sizes: ragged send sizes tensor.
// output: ragged output data tensor.
// output_offsets: ragged output offsets tensor.
// output_sizes: ragged output sizes tensor.
// recv_sizes: ragged recv sizes tensor.
//
// The '*_offsets' and '*_sizes' tensors must have the same shape.
// The output buffer is passed in as an input (and aliased in the output),
Expand Down
18 changes: 9 additions & 9 deletions xla/hlo/parser/hlo_parser_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2193,10 +2193,10 @@ ENTRY AllToAll {
input = bf16[1024,256]{1,0} parameter(0)
output = bf16[1024,256]{1,0} parameter(1)
input_offsets = s32[8]{0} parameter(2)
input_sizes = s32[8]{0} parameter(3)
send_sizes = s32[8]{0} parameter(3)
output_offsets = s32[8]{0} parameter(4)
output_sizes = s32[8]{0} parameter(5)
ROOT ra2a = bf16[1024,256]{1,0} ragged-all-to-all(input, output, input_offsets, input_sizes, output_offsets, output_sizes), replica_groups={{0,1,2,3,4,5,6,7}}
recv_sizes = s32[8]{0} parameter(5)
ROOT ra2a = bf16[1024,256]{1,0} ragged-all-to-all(input, output, input_offsets, send_sizes, output_offsets, recv_sizes), replica_groups={{0,1,2,3,4,5,6,7}}
}
)",
Expand All @@ -2211,10 +2211,10 @@ ENTRY AllToAll {
input = bf16[1024,256]{1,0} parameter(0)
output = bf16[1024,256]{1,0} parameter(1)
input_offsets = s32[8]{0} parameter(2)
input_sizes = s32[8]{0} parameter(3)
send_sizes = s32[8]{0} parameter(3)
output_offsets = s32[8]{0} parameter(4)
output_sizes = s32[8]{0} parameter(5)
ROOT ra2a = bf16[1024,256]{1,0} ragged-all-to-all(input, output, input_offsets, input_sizes, output_offsets, output_sizes), replica_groups=[2,4]<=[4,2]T(1,0)
recv_sizes = s32[8]{0} parameter(5)
ROOT ra2a = bf16[1024,256]{1,0} ragged-all-to-all(input, output, input_offsets, send_sizes, output_offsets, recv_sizes), replica_groups=[2,4]<=[4,2]T(1,0)
}
)",
Expand All @@ -2229,10 +2229,10 @@ ENTRY AllToAll {
input = bf16[1024,256]{1,0} parameter(0)
output = bf16[1024,256]{1,0} parameter(1)
input_offsets = s32[8]{0} parameter(2)
input_sizes = s32[8]{0} parameter(3)
send_sizes = s32[8]{0} parameter(3)
output_offsets = s32[8]{0} parameter(4)
output_sizes = s32[8]{0} parameter(5)
ROOT ra2a = bf16[1024,256]{1,0} ragged-all-to-all(input, output, input_offsets, input_sizes, output_offsets, output_sizes), replica_groups={}
recv_sizes = s32[8]{0} parameter(5)
ROOT ra2a = bf16[1024,256]{1,0} ragged-all-to-all(input, output, input_offsets, send_sizes, output_offsets, recv_sizes), replica_groups={}
}
)"
Expand Down

0 comments on commit 42ea5b4

Please sign in to comment.