Skip to content

[DO_NO_MERGE]Revert "TensorFlow June pin update & PyTorch/XLA c++17 migration (fol… #3968

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions build_torch_xla_libs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,9 @@ if [[ ! -z "$BAZEL_JOBS" ]]; then
MAX_JOBS="--jobs=$BAZEL_JOBS"
fi

OPTS+=(--cxxopt="-std=c++17")
OPTS+=(--cxxopt="-std=c++14")
if [[ $(basename -- $CC) =~ ^clang ]]; then
OPTS+=(--cxxopt="-Wno-c++11-narrowing")
OPTS+=(--cxxopt="-Wno-c++14-narrowing")
fi

if [[ "$XLA_CUDA" == "1" ]]; then
Expand Down
2 changes: 1 addition & 1 deletion docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ RUN if [ "${git_clone}" = "true" ]; then github_branch="${release_version}" && \
if [ "${release_version}" = "nightly" ]; then github_branch="master"; fi && \
cd /pytorch && \
rm -rf xla && \
git clone -b "${github_branch}" --recursive https://github.com/pytorch/xla && \
git clone -b revert_to_old_tf --recursive https://github.com/pytorch/xla && \
cd / && \
git clone -b "${github_branch}" --recursive https://github.com/pytorch-tpu/examples tpu-examples; fi

Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ def make_relative_rpath(path):


extra_compile_args = [
'-std=c++17',
'-std=c++14',
'-Wno-sign-compare',
'-Wno-deprecated-declarations',
'-Wno-return-type',
Expand Down
2 changes: 0 additions & 2 deletions test/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,6 @@ ExternalProject_Add(
CMAKE_ARGS
"-DCMAKE_CXX_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=${PT_CXX_ABI}")

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17")

ExternalProject_Get_Property(googletest SOURCE_DIR)

set(TORCH_XLA_TEST_SOURCES
Expand Down
42 changes: 0 additions & 42 deletions test/pjrt/test_experimental_pjrt_multi_cpu.py

This file was deleted.

3 changes: 1 addition & 2 deletions test/run_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,7 @@ function run_xla_backend_mp {

function run_pjrt {
echo "Running in PjRt runtime: $@"
# TODO(darisoy): run these tests with multiple CPU devices, this fails due to TF issue.
PJRT_DEVICE=CPU CPU_NUM_DEVICES=1 run_test "$@"
PJRT_DEVICE=CPU run_test "$@"
}

function run_async_scalar {
Expand Down
2 changes: 1 addition & 1 deletion third_party/tensorflow
Submodule tensorflow updated 4235 files
1 change: 0 additions & 1 deletion third_party/xla_client/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,6 @@ cc_library(
"//tensorflow/compiler/xla/pjrt:cpu_device",
"//tensorflow/compiler/xla/pjrt:tpu_client",
"//tensorflow/compiler/xla/pjrt:pjrt_client",
"//tensorflow/compiler/xla/pjrt:tfrt_cpu_pjrt_client",
"//tensorflow/compiler/xla/rpc:grpc_stub",
"//tensorflow/compiler/xla/service:cpu_plugin",
"//tensorflow/compiler/xla/service:platform_util",
Expand Down
1 change: 0 additions & 1 deletion third_party/xla_client/env_vars.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ const char* const kEnvTpuvmMode = "TPUVM_MODE";
const char* const kEnvPjRtDevice = "PJRT_DEVICE";
const char* const kEnvPjRtTpuMaxInflightComputations =
"PJRT_TPU_MAX_INFLIGHT_COMPUTATIONS";
const char* const kEnvPjrtAsyncCpuClient = "PJRT_CPU_ASYNC_CLIENT";

} // namespace env
} // namespace xla
1 change: 0 additions & 1 deletion third_party/xla_client/env_vars.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ extern const char* const kEnvStartService;
extern const char* const kEnvTpuvmMode;
extern const char* const kEnvPjRtDevice;
extern const char* const kEnvPjRtTpuMaxInflightComputations;
extern const char* const kEnvPjrtAsyncCpuClient;

} // namespace env
} // namespace xla
Expand Down
6 changes: 1 addition & 5 deletions third_party/xla_client/pjrt_computation_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
#include "tensorflow/compiler/xla/literal.h"
#include "tensorflow/compiler/xla/pjrt/cpu_device.h"
#include "tensorflow/compiler/xla/pjrt/pjrt_client.h"
#include "tensorflow/compiler/xla/pjrt/tfrt_cpu_pjrt_client.h"
#include "tensorflow/compiler/xla/pjrt/tpu_client.h"
#include "tensorflow/compiler/xla/shape.h"
#include "tensorflow/compiler/xla/xla_client/computation_client.h"
Expand Down Expand Up @@ -47,10 +46,7 @@ PjRtComputationClient::PjRtComputationClient() {
std::string device_type = sys_util::GetEnvString(env::kEnvPjRtDevice, "");
if (device_type == "CPU") {
TF_VLOG(1) << "Initializing PjRt CPU client...";
bool async = sys_util::GetEnvBool(env::kEnvPjrtAsyncCpuClient, true);
int cpu_device_count = sys_util::GetEnvInt(env::kEnvNumCpu, 1);
client_ =
std::move(xla::GetTfrtCpuClient(async, cpu_device_count).ValueOrDie());
client_ = std::move(xla::GetCpuClient(/*asynchronous=*/false).ValueOrDie());
} else if (device_type == "TPU") {
TF_VLOG(1) << "Initializing PjRt TPU client...";
int64_t max_inflight_computations = sys_util::GetEnvInt(
Expand Down
1 change: 0 additions & 1 deletion torch_xla/core/xla_env_vars.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,3 @@
TPU_PROCESS_ADDRESSES = 'TPU_PROCESS_ADDRESSES'
TPU_VISIBLE_CHIPS = 'TPU_VISIBLE_CHIPS'
TPU_PROCESS_PORT = 'TPU_PROCESS_PORT'
PJRT_CPU_ASYNC_CLIENT = 'PJRT_CPU_ASYNC_CLIENT'