Skip to content

Commit

Permalink
Enable bazel build --nobuild to prevent network flakes for TensorFl…
Browse files Browse the repository at this point in the history
…ow builds

Removes the usage of their `py_cpp_test_filters` config which is incompatible with `bazel build --nobuild` and instead replicate the effect of the config by specifying bazel options explicitly.

PiperOrigin-RevId: 726582864
  • Loading branch information
ddunl authored and Google-ML-Automation committed Feb 13, 2025
1 parent ba741cd commit c3b59c3
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 10 deletions.
33 changes: 25 additions & 8 deletions build_tools/ci/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,11 +206,8 @@ def commands(self) -> List[List[str]]:
# problems in practice.

# TODO(ddunleavy): Remove the condition here. Need to get parallel on the
# MacOS VM, and slightly change TF config (likely by specifying tag_filters
# manually).
# MacOS VM.
if self.type_ not in (
BuildType.TENSORFLOW_CPU_SELF_HOSTED,
BuildType.TENSORFLOW_X86_GPU_T4_SELF_HOSTED,
BuildType.MACOS_CPU_X86,
):
cmds.append(
Expand Down Expand Up @@ -404,14 +401,31 @@ def nvidia_gpu_build_with_compute_capability(
),
)

tensorflow_tag_filters = (
"-no_oss",
"-tf_tosa",
"-oss_excluded",
"-oss_serial",
"-tpu",
"-benchmark-test",
"-v1only",
)

tensorflow_cpu_tag_filters = tensorflow_tag_filters + ("-gpu",)
tensorflow_gpu_tag_filters = tensorflow_tag_filters + (
"-no_gpu",
"-no_gpu_presubmit",
"-no_cuda11",
"+gpu",
)

_TENSORFLOW_CPU_SELF_HOSTED_BUILD = Build(
type_=BuildType.TENSORFLOW_CPU_SELF_HOSTED,
repo="tensorflow/tensorflow",
image_url=None,
configs=(
"release_cpu_linux",
"rbe_linux_cpu",
"linux_cpu_pycpp_test_filters",
),
target_patterns=(
"//tensorflow/compiler/...",
Expand All @@ -420,11 +434,14 @@ def nvidia_gpu_build_with_compute_capability(
"-//tensorflow/python/distribute/...",
"-//tensorflow/python/compiler/tensorrt/...",
),
build_tag_filters=tensorflow_cpu_tag_filters,
test_tag_filters=tensorflow_cpu_tag_filters,
options=dict(
verbose_failures=True,
test_output="errors",
override_repository=f"xla={_GITHUB_WORKSPACE}/openxla/xla",
profile="profile.json.gz",
test_lang_filters="cc,py",
),
)

Expand All @@ -435,7 +452,6 @@ def nvidia_gpu_build_with_compute_capability(
configs=(
"release_gpu_linux",
"rbe_linux_cuda",
"linux_cuda_pycpp_test_filters",
),
target_patterns=(
"//tensorflow/compiler/...",
Expand All @@ -444,13 +460,14 @@ def nvidia_gpu_build_with_compute_capability(
"-//tensorflow/python/distribute/...",
"-//tensorflow/python/compiler/tensorrt/...",
),
build_tag_filters=("-no_oss", "+gpu"),
test_tag_filters=("-no_oss", "+gpu"),
build_tag_filters=tensorflow_gpu_tag_filters,
test_tag_filters=tensorflow_gpu_tag_filters,
options=dict(
verbose_failures=True,
test_output="errors",
override_repository=f"xla={_GITHUB_WORKSPACE}/openxla/xla",
profile="profile.json.gz",
test_lang_filters="cc,py",
),
)

Expand Down
6 changes: 4 additions & 2 deletions build_tools/ci/golden_commands.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,12 @@ bazel test --build_tag_filters=-no_oss,-gpu,-no_mac,-mac_excluded,-requires-gpu-
bazel analyze-profile profile.json.gz
# END BuildType.MACOS_CPU_X86
# BEGIN BuildType.TENSORFLOW_CPU_SELF_HOSTED
bazel test --build_tag_filters= --test_tag_filters= --config=release_cpu_linux --config=rbe_linux_cpu --config=linux_cpu_pycpp_test_filters --verbose_failures --test_output=errors --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --profile=profile.json.gz -- //tensorflow/compiler/... -//tensorflow/compiler/tf2tensorrt/... //tensorflow/python/... -//tensorflow/python/distribute/... -//tensorflow/python/compiler/tensorrt/...
parallel --ungroup --retries 3 --delay 15 --nonall -- bazel build --build_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-tpu,-benchmark-test,-v1only,-gpu --test_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-tpu,-benchmark-test,-v1only,-gpu --config=release_cpu_linux --config=rbe_linux_cpu --verbose_failures --test_output=errors --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --profile=profile.json.gz --test_lang_filters=cc,py --nobuild -- //tensorflow/compiler/... -//tensorflow/compiler/tf2tensorrt/... //tensorflow/python/... -//tensorflow/python/distribute/... -//tensorflow/python/compiler/tensorrt/...
bazel test --build_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-tpu,-benchmark-test,-v1only,-gpu --test_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-tpu,-benchmark-test,-v1only,-gpu --config=release_cpu_linux --config=rbe_linux_cpu --verbose_failures --test_output=errors --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --profile=profile.json.gz --test_lang_filters=cc,py -- //tensorflow/compiler/... -//tensorflow/compiler/tf2tensorrt/... //tensorflow/python/... -//tensorflow/python/distribute/... -//tensorflow/python/compiler/tensorrt/...
bazel analyze-profile profile.json.gz
# END BuildType.TENSORFLOW_CPU_SELF_HOSTED
# BEGIN BuildType.TENSORFLOW_X86_GPU_T4_SELF_HOSTED
bazel test --build_tag_filters=-no_oss,+gpu --test_tag_filters=-no_oss,+gpu --config=release_gpu_linux --config=rbe_linux_cuda --config=linux_cuda_pycpp_test_filters --verbose_failures --test_output=errors --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --profile=profile.json.gz -- //tensorflow/compiler/... -//tensorflow/compiler/tf2tensorrt/... //tensorflow/python/... -//tensorflow/python/distribute/... -//tensorflow/python/compiler/tensorrt/...
parallel --ungroup --retries 3 --delay 15 --nonall -- bazel build --build_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-tpu,-benchmark-test,-v1only,-no_gpu,-no_gpu_presubmit,-no_cuda11,+gpu --test_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-tpu,-benchmark-test,-v1only,-no_gpu,-no_gpu_presubmit,-no_cuda11,+gpu --config=release_gpu_linux --config=rbe_linux_cuda --verbose_failures --test_output=errors --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --profile=profile.json.gz --test_lang_filters=cc,py --nobuild -- //tensorflow/compiler/... -//tensorflow/compiler/tf2tensorrt/... //tensorflow/python/... -//tensorflow/python/distribute/... -//tensorflow/python/compiler/tensorrt/...
bazel test --build_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-tpu,-benchmark-test,-v1only,-no_gpu,-no_gpu_presubmit,-no_cuda11,+gpu --test_tag_filters=-no_oss,-tf_tosa,-oss_excluded,-oss_serial,-tpu,-benchmark-test,-v1only,-no_gpu,-no_gpu_presubmit,-no_cuda11,+gpu --config=release_gpu_linux --config=rbe_linux_cuda --verbose_failures --test_output=errors --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --profile=profile.json.gz --test_lang_filters=cc,py -- //tensorflow/compiler/... -//tensorflow/compiler/tf2tensorrt/... //tensorflow/python/... -//tensorflow/python/distribute/... -//tensorflow/python/compiler/tensorrt/...
bazel analyze-profile profile.json.gz
# END BuildType.TENSORFLOW_X86_GPU_T4_SELF_HOSTED

0 comments on commit c3b59c3

Please sign in to comment.