aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKuy Mainwaring <kuym@google.com>2024-05-09 15:49:59 -0700
committerTensorFlower Gardener <gardener@tensorflow.org>2024-05-09 16:46:30 -0700
commit090df53ad909ebcd5f7c85803146564f89d66cd7 (patch)
tree9bef4d80303b95dc41b3de918a1c5e957378769a
parent863ad12e06ae2f625494377a132a24da912cc620 (diff)
downloadtensorflow-090df53ad909ebcd5f7c85803146564f89d66cd7.tar.gz
[XLA:GPU] Clang-tidy fixes for xla/service/gpu/gpu_schedule_postprocessing.cc
PiperOrigin-RevId: 632293724
-rw-r--r--third_party/xla/xla/service/gpu/BUILD1
-rw-r--r--third_party/xla/xla/service/gpu/gpu_schedule_postprocessing.cc5
2 files changed, 3 insertions, 3 deletions
diff --git a/third_party/xla/xla/service/gpu/BUILD b/third_party/xla/xla/service/gpu/BUILD
index 9aac482ee1d..63c17212b35 100644
--- a/third_party/xla/xla/service/gpu/BUILD
+++ b/third_party/xla/xla/service/gpu/BUILD
@@ -4186,7 +4186,6 @@ cc_library(
hdrs = ["gpu_schedule_postprocessing.h"],
deps = [
":backend_configs_cc",
- "//xla:statusor",
"//xla/hlo/ir:hlo",
"//xla/hlo/utils:hlo_query",
"//xla/service:hlo_pass",
diff --git a/third_party/xla/xla/service/gpu/gpu_schedule_postprocessing.cc b/third_party/xla/xla/service/gpu/gpu_schedule_postprocessing.cc
index e71232c0a2d..a0af7981186 100644
--- a/third_party/xla/xla/service/gpu/gpu_schedule_postprocessing.cc
+++ b/third_party/xla/xla/service/gpu/gpu_schedule_postprocessing.cc
@@ -66,7 +66,7 @@ absl::StatusOr<bool> IsRelevantAsynchronousStart(const HloInstruction* hlo) {
}
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
hlo->backend_config<GpuBackendConfig>());
- CollectiveBackendConfig collective_backend_config =
+ const CollectiveBackendConfig& collective_backend_config =
gpu_config.collective_backend_config();
return !collective_backend_config.is_sync();
}
@@ -96,7 +96,8 @@ absl::StatusOr<bool> ProcessComputation(
// attribute no_parallel_custom_call to true. When we see a custom-call, clear
// the start ops from the collection and keep their attribute
// no_parallel_custom_call as false.
- const std::vector<HloInstruction*> all_instructions = sequence.instructions();
+ const std::vector<HloInstruction*>& all_instructions =
+ sequence.instructions();
for (HloInstruction* hlo : all_instructions) {
if (MayInvokeCustomCall(hlo, custom_call_in_computation)) {
async_starts.clear();