Skip to content

Commit

Permalink
Delete TensorFlow GPU Kokoro build now that GitHub Actions build runs…
Browse files Browse the repository at this point in the history
… continuously

Also don't `bazel build --nobuild` for the new TensorFlow GHA GPU build.

PiperOrigin-RevId: 725764079
  • Loading branch information
ddunl authored and Google-ML-Automation committed Feb 11, 2025
1 parent 97fbd35 commit 41720e9
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 39 deletions.
30 changes: 1 addition & 29 deletions build_tools/ci/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ class BuildType(enum.Enum):
JAX_X86_GPU_T4_SELF_HOSTED = enum.auto()

TENSORFLOW_CPU_SELF_HOSTED = enum.auto()
TENSORFLOW_GPU = enum.auto()
TENSORFLOW_X86_GPU_T4_SELF_HOSTED = enum.auto()


Expand Down Expand Up @@ -214,7 +213,7 @@ def commands(self) -> List[List[str]]:
# manually).
if self.type_ not in (
BuildType.TENSORFLOW_CPU_SELF_HOSTED,
BuildType.TENSORFLOW_GPU,
BuildType.TENSORFLOW_X86_GPU_T4_SELF_HOSTED,
BuildType.MACOS_CPU_X86,
):
cmds.append(
Expand Down Expand Up @@ -460,32 +459,6 @@ def nvidia_gpu_build_with_compute_capability(
),
)

_TENSORFLOW_GPU_BUILD = Build(
type_=BuildType.TENSORFLOW_GPU,
repo="tensorflow/tensorflow",
image_url=_ML_BUILD_IMAGE,
configs=(
"release_gpu_linux",
"rbe_linux_cuda",
"linux_cuda_pycpp_test_filters",
),
target_patterns=(
"//tensorflow/compiler/...",
"-//tensorflow/compiler/tf2tensorrt/...",
"//tensorflow/python/...",
"-//tensorflow/python/distribute/...",
"-//tensorflow/python/compiler/tensorrt/...",
),
build_tag_filters=("-no_oss", "+gpu"),
test_tag_filters=("-no_oss", "+gpu"),
options=dict(
verbose_failures=True,
test_output="errors",
override_repository="xla=/github/xla",
profile="profile.json.gz",
),
)

_TENSORFLOW_GPU_SELF_HOSTED_BUILD = Build(
type_=BuildType.TENSORFLOW_X86_GPU_T4_SELF_HOSTED,
repo="tensorflow/tensorflow",
Expand Down Expand Up @@ -517,7 +490,6 @@ def nvidia_gpu_build_with_compute_capability(
"tensorflow/xla/linux/github_continuous/build_gpu": _GPU_BUILD,
"tensorflow/xla/macos/github_continuous/cpu_py39_full": _MACOS_X86_BUILD,
"tensorflow/xla/jax/gpu/build_gpu": _JAX_GPU_BUILD,
"tensorflow/xla/tensorflow/gpu/build_gpu": _TENSORFLOW_GPU_BUILD,
"xla-linux-x86-cpu": _CPU_X86_SELF_HOSTED_BUILD,
"xla-linux-arm64-cpu": _CPU_ARM64_SELF_HOSTED_BUILD,
"xla-linux-x86-gpu-t4": _GPU_T4_SELF_HOSTED_BUILD,
Expand Down
10 changes: 0 additions & 10 deletions build_tools/ci/golden_commands.txt
Original file line number Diff line number Diff line change
Expand Up @@ -57,17 +57,7 @@ bazel analyze-profile profile.json.gz
bazel test --build_tag_filters= --test_tag_filters= --config=release_cpu_linux --config=rbe_linux_cpu --config=linux_cpu_pycpp_test_filters --verbose_failures --test_output=errors --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --profile=profile.json.gz -- //tensorflow/compiler/... -//tensorflow/compiler/tf2tensorrt/... //tensorflow/python/... -//tensorflow/python/distribute/... -//tensorflow/python/compiler/tensorrt/...
bazel analyze-profile profile.json.gz
# END BuildType.TENSORFLOW_CPU_SELF_HOSTED
# BEGIN BuildType.TENSORFLOW_GPU
$KOKORO_ARTIFACTS_DIR/github/xla/.kokoro/generate_index_html.sh index.html
git clone --depth=1 https://github.com/tensorflow/tensorflow ./github/tensorflow
parallel --ungroup --retries 3 --delay 15 --nonall -- docker pull us-central1-docker.pkg.dev/tensorflow-sigs/tensorflow/ml-build:latest
docker run --detach --name=xla_ci --rm --interactive --tty --volume=./github:/github --workdir=/github/tensorflow us-central1-docker.pkg.dev/tensorflow-sigs/tensorflow/ml-build:latest bash
docker exec xla_ci bazel test --build_tag_filters=-no_oss,+gpu --test_tag_filters=-no_oss,+gpu --config=release_gpu_linux --config=rbe_linux_cuda --config=linux_cuda_pycpp_test_filters --verbose_failures --test_output=errors --override_repository=xla=/github/xla --profile=profile.json.gz -- //tensorflow/compiler/... -//tensorflow/compiler/tf2tensorrt/... //tensorflow/python/... -//tensorflow/python/distribute/... -//tensorflow/python/compiler/tensorrt/...
docker exec xla_ci bazel analyze-profile profile.json.gz
docker stop xla_ci
# END BuildType.TENSORFLOW_GPU
# BEGIN BuildType.TENSORFLOW_X86_GPU_T4_SELF_HOSTED
parallel --ungroup --retries 3 --delay 15 --nonall -- bazel build --build_tag_filters=-no_oss,+gpu --test_tag_filters=-no_oss,+gpu --config=release_gpu_linux --config=rbe_linux_cuda --config=linux_cuda_pycpp_test_filters --verbose_failures --test_output=errors --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --profile=profile.json.gz --nobuild -- //tensorflow/compiler/... -//tensorflow/compiler/tf2tensorrt/... //tensorflow/python/... -//tensorflow/python/distribute/... -//tensorflow/python/compiler/tensorrt/...
bazel test --build_tag_filters=-no_oss,+gpu --test_tag_filters=-no_oss,+gpu --config=release_gpu_linux --config=rbe_linux_cuda --config=linux_cuda_pycpp_test_filters --verbose_failures --test_output=errors --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --profile=profile.json.gz -- //tensorflow/compiler/... -//tensorflow/compiler/tf2tensorrt/... //tensorflow/python/... -//tensorflow/python/distribute/... -//tensorflow/python/compiler/tensorrt/...
bazel analyze-profile profile.json.gz
# END BuildType.TENSORFLOW_X86_GPU_T4_SELF_HOSTED

0 comments on commit 41720e9

Please sign in to comment.