Skip to content

Commit

Permalink
Delete Kokoro GPU builds now that GitHub Actions GPU builds block
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 726535011
  • Loading branch information
ddunl authored and Google-ML-Automation committed Feb 13, 2025
1 parent 632cb10 commit 1b07f67
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 99 deletions.
22 changes: 0 additions & 22 deletions .kokoro/jax/build.sh

This file was deleted.

23 changes: 0 additions & 23 deletions .kokoro/linux/build.sh

This file was deleted.

34 changes: 0 additions & 34 deletions build_tools/ci/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,14 +90,11 @@ class BuildType(enum.Enum):
"""Enum representing all types of builds."""
CPU_X86_SELF_HOSTED = enum.auto()
CPU_ARM64_SELF_HOSTED = enum.auto()
GPU = enum.auto()
GPU_T4_SELF_HOSTED = enum.auto()
GPU_CONTINUOUS = enum.auto()

MACOS_CPU_X86 = enum.auto()

JAX_CPU_SELF_HOSTED = enum.auto()
JAX_GPU = enum.auto()
JAX_X86_GPU_T4_SELF_HOSTED = enum.auto()

TENSORFLOW_CPU_SELF_HOSTED = enum.auto()
Expand Down Expand Up @@ -319,12 +316,6 @@ def nvidia_gpu_build_with_compute_capability(
build_tag_filters=cpu_arm_tag_filter,
test_tag_filters=cpu_arm_tag_filter,
)
_GPU_BUILD = nvidia_gpu_build_with_compute_capability(
type_=BuildType.GPU,
image_url=_ML_BUILD_IMAGE,
configs=("warnings", "rbe_linux_cuda_nvcc"),
compute_capability=75,
)

_GPU_T4_SELF_HOSTED_BUILD = nvidia_gpu_build_with_compute_capability(
type_=BuildType.GPU_T4_SELF_HOSTED,
Expand Down Expand Up @@ -393,28 +384,6 @@ def nvidia_gpu_build_with_compute_capability(
),
)

_JAX_GPU_BUILD = Build(
type_=BuildType.JAX_GPU,
repo="google/jax",
image_url=_DEFAULT_IMAGE,
configs=(
"rbe_linux_x86_64_cuda",
),
target_patterns=("//tests:gpu_tests", "//tests:backend_independent_tests"),
build_tag_filters=("-multiaccelerator",),
test_tag_filters=("-multiaccelerator",),
test_env=dict(
JAX_SKIP_SLOW_TESTS=1,
TF_CPP_MIN_LOG_LEVEL=0,
JAX_EXCLUDE_TEST_TARGETS="PmapTest.testSizeOverflow",
),
options=dict(
**_DEFAULT_BAZEL_OPTIONS,
override_repository="xla=/github/xla",
repo_env="HERMETIC_PYTHON_VERSION=3.10",
),
)

_JAX_GPU_SELF_HOSTED_BUILD = Build(
type_=BuildType.JAX_X86_GPU_T4_SELF_HOSTED,
repo="google/jax",
Expand Down Expand Up @@ -486,10 +455,7 @@ def nvidia_gpu_build_with_compute_capability(
)

_KOKORO_JOB_NAME_TO_BUILD_MAP = {
"tensorflow/xla/linux/gpu/build_gpu": _GPU_BUILD,
"tensorflow/xla/linux/github_continuous/build_gpu": _GPU_BUILD,
"tensorflow/xla/macos/github_continuous/cpu_py39_full": _MACOS_X86_BUILD,
"tensorflow/xla/jax/gpu/build_gpu": _JAX_GPU_BUILD,
"xla-linux-x86-cpu": _CPU_X86_SELF_HOSTED_BUILD,
"xla-linux-arm64-cpu": _CPU_ARM64_SELF_HOSTED_BUILD,
"xla-linux-x86-gpu-t4": _GPU_T4_SELF_HOSTED_BUILD,
Expand Down
20 changes: 0 additions & 20 deletions build_tools/ci/golden_commands.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,6 @@ parallel --ungroup --retries 3 --delay 15 --nonall -- bazel build --build_tag_fi
bazel test --build_tag_filters=-no_oss,-gpu,-requires-gpu-nvidia,-requires-gpu-amd --test_tag_filters=-no_oss,-gpu,-requires-gpu-nvidia,-requires-gpu-amd --config=warnings --config=nonccl --config=rbe_linux_cpu --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async -- //xla/... //build_tools/... @tsl//tsl/...
bazel analyze-profile profile.json.gz
# END BuildType.CPU_X86_SELF_HOSTED
# BEGIN BuildType.GPU
$KOKORO_ARTIFACTS_DIR/github/xla/.kokoro/generate_index_html.sh index.html
nvidia-smi
parallel --ungroup --retries 3 --delay 15 --nonall -- docker pull us-central1-docker.pkg.dev/tensorflow-sigs/tensorflow/ml-build:latest
docker run --detach --name=xla_ci --rm --interactive --tty --volume=./github:/github --workdir=/github/xla us-central1-docker.pkg.dev/tensorflow-sigs/tensorflow/ml-build:latest bash
docker exec xla_ci parallel --ungroup --retries 3 --delay 15 --nonall -- bazel build --build_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only --test_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only,requires-gpu-sm75-only,requires-gpu-sm60,requires-gpu-sm70,-requires-gpu-sm80,-requires-gpu-sm80-only,-requires-gpu-sm90,-requires-gpu-sm90-only,-requires-gpu-sm100,-requires-gpu-sm100-only,-requires-gpu-amd --config=warnings --config=rbe_linux_cuda_nvcc --run_under=//build_tools/ci:parallel_gpu_execute --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=7.5 --@cuda_driver//:enable_forward_compatibility=true --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async --nobuild -- //xla/... //build_tools/... @tsl//tsl/...
docker exec xla_ci bazel test --build_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only --test_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only,requires-gpu-sm75-only,requires-gpu-sm60,requires-gpu-sm70,-requires-gpu-sm80,-requires-gpu-sm80-only,-requires-gpu-sm90,-requires-gpu-sm90-only,-requires-gpu-sm100,-requires-gpu-sm100-only,-requires-gpu-amd --config=warnings --config=rbe_linux_cuda_nvcc --run_under=//build_tools/ci:parallel_gpu_execute --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=7.5 --@cuda_driver//:enable_forward_compatibility=true --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async -- //xla/... //build_tools/... @tsl//tsl/...
docker exec xla_ci bazel analyze-profile profile.json.gz
docker stop xla_ci
# END BuildType.GPU
# BEGIN BuildType.GPU_T4_SELF_HOSTED
nvidia-smi
parallel --ungroup --retries 3 --delay 15 --nonall -- bazel build --build_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only --test_tag_filters=-no_oss,requires-gpu-nvidia,gpu,-rocm-only,requires-gpu-sm75-only,requires-gpu-sm60,requires-gpu-sm70,-requires-gpu-sm80,-requires-gpu-sm80-only,-requires-gpu-sm90,-requires-gpu-sm90-only,-requires-gpu-sm100,-requires-gpu-sm100-only,-requires-gpu-amd --config=warnings --config=rbe_linux_cuda_nvcc --run_under=//build_tools/ci:parallel_gpu_execute --repo_env=TF_CUDA_COMPUTE_CAPABILITIES=7.5 --@cuda_driver//:enable_forward_compatibility=true --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async --nobuild -- //xla/... //build_tools/... @tsl//tsl/...
Expand All @@ -29,16 +19,6 @@ parallel --ungroup --retries 3 --delay 15 --nonall -- bazel build --build_tag_fi
bazel test --build_tag_filters= --test_tag_filters= --config=rbe_linux_x86_64 --test_env=JAX_NUM_GENERATED_CASES=25 --test_env=JAX_SKIP_SLOW_TESTS=1 --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --repo_env=HERMETIC_PYTHON_VERSION=3.12 -- //tests:cpu_tests //tests:backend_independent_tests
bazel analyze-profile profile.json.gz
# END BuildType.JAX_CPU_SELF_HOSTED
# BEGIN BuildType.JAX_GPU
$KOKORO_ARTIFACTS_DIR/github/xla/.kokoro/generate_index_html.sh index.html
git clone --depth=1 https://github.com/google/jax ./github/jax
parallel --ungroup --retries 3 --delay 15 --nonall -- docker pull gcr.io/tensorflow-sigs/build:latest-python3.11
docker run --detach --name=xla_ci --rm --interactive --tty --volume=./github:/github --workdir=/github/jax gcr.io/tensorflow-sigs/build:latest-python3.11 bash
docker exec xla_ci parallel --ungroup --retries 3 --delay 15 --nonall -- bazel build --build_tag_filters=-multiaccelerator --test_tag_filters=-multiaccelerator --config=rbe_linux_x86_64_cuda --test_env=JAX_SKIP_SLOW_TESTS=1 --test_env=TF_CPP_MIN_LOG_LEVEL=0 --test_env=JAX_EXCLUDE_TEST_TARGETS=PmapTest.testSizeOverflow --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async --override_repository=xla=/github/xla --repo_env=HERMETIC_PYTHON_VERSION=3.10 --nobuild -- //tests:gpu_tests //tests:backend_independent_tests
docker exec xla_ci bazel test --build_tag_filters=-multiaccelerator --test_tag_filters=-multiaccelerator --config=rbe_linux_x86_64_cuda --test_env=JAX_SKIP_SLOW_TESTS=1 --test_env=TF_CPP_MIN_LOG_LEVEL=0 --test_env=JAX_EXCLUDE_TEST_TARGETS=PmapTest.testSizeOverflow --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async --override_repository=xla=/github/xla --repo_env=HERMETIC_PYTHON_VERSION=3.10 -- //tests:gpu_tests //tests:backend_independent_tests
docker exec xla_ci bazel analyze-profile profile.json.gz
docker stop xla_ci
# END BuildType.JAX_GPU
# BEGIN BuildType.JAX_X86_GPU_T4_SELF_HOSTED
parallel --ungroup --retries 3 --delay 15 --nonall -- bazel build --build_tag_filters=-multiaccelerator --test_tag_filters=-multiaccelerator --config=rbe_linux_x86_64_cuda --test_env=JAX_SKIP_SLOW_TESTS=1 --test_env=TF_CPP_MIN_LOG_LEVEL=0 --test_env=JAX_EXCLUDE_TEST_TARGETS=PmapTest.testSizeOverflow --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --repo_env=HERMETIC_PYTHON_VERSION=3.10 --nobuild -- //tests:gpu_tests //tests:backend_independent_tests
bazel test --build_tag_filters=-multiaccelerator --test_tag_filters=-multiaccelerator --config=rbe_linux_x86_64_cuda --test_env=JAX_SKIP_SLOW_TESTS=1 --test_env=TF_CPP_MIN_LOG_LEVEL=0 --test_env=JAX_EXCLUDE_TEST_TARGETS=PmapTest.testSizeOverflow --test_output=errors --verbose_failures --keep_going --nobuild_tests_only --profile=profile.json.gz --flaky_test_attempts=3 --jobs=150 --bes_upload_mode=fully_async --override_repository=xla=$GITHUB_WORKSPACE/openxla/xla --repo_env=HERMETIC_PYTHON_VERSION=3.10 -- //tests:gpu_tests //tests:backend_independent_tests
Expand Down

0 comments on commit 1b07f67

Please sign in to comment.