2018-09-23 21:52:33 +07:00
|
|
|
# include all ray third party dependencies
|
2017-12-14 14:54:09 -08:00
|
|
|
|
2018-07-18 12:33:02 -07:00
|
|
|
# Because we use the old C++ ABI to be compatible with TensorFlow,
|
|
|
|
# we have to turn it on for dependencies too
|
|
|
|
set(EP_CXX_FLAGS "${EP_CXX_FLAGS} -D_GLIBCXX_USE_CXX11_ABI=0")
|
|
|
|
|
2018-10-11 05:33:15 +08:00
|
|
|
# The following is needed because in CentOS, the lib directory is named lib64
|
|
|
|
if(EXISTS "/etc/redhat-release" AND CMAKE_SIZEOF_VOID_P EQUAL 8)
|
|
|
|
set(LIB_SUFFIX 64)
|
|
|
|
endif()
|
|
|
|
|
2017-12-14 14:54:09 -08:00
|
|
|
if(RAY_BUILD_TESTS OR RAY_BUILD_BENCHMARKS)
|
|
|
|
add_custom_target(unittest ctest -L unittest)
|
|
|
|
|
2018-09-23 21:52:33 +07:00
|
|
|
include(GtestExternalProject)
|
2017-12-14 14:54:09 -08:00
|
|
|
message(STATUS "GTest include dir: ${GTEST_INCLUDE_DIR}")
|
|
|
|
message(STATUS "GTest static library: ${GTEST_STATIC_LIB}")
|
2018-09-23 21:52:33 +07:00
|
|
|
message(STATUS "GTest static main library: ${GTEST_MAIN_STATIC_LIB}")
|
|
|
|
message(STATUS "GMock static main library: ${GMOCK_MAIN_STATIC_LIB}")
|
2017-12-14 14:54:09 -08:00
|
|
|
include_directories(SYSTEM ${GTEST_INCLUDE_DIR})
|
|
|
|
ADD_THIRDPARTY_LIB(gtest
|
|
|
|
STATIC_LIB ${GTEST_STATIC_LIB})
|
|
|
|
ADD_THIRDPARTY_LIB(gtest_main
|
|
|
|
STATIC_LIB ${GTEST_MAIN_STATIC_LIB})
|
[XRay] Raylet node and object manager unification/backend redesign. (#1640)
* directory for raylet
* some initial class scaffolding -- in progress
* node_manager build code and test stub files.
* class scaffolding for resources, workers, and the worker pool
* Node manager server loop
* raylet policy and queue - wip checkpoint
* fix dependencies
* add gen_nm_fbs as target.
* object manager build, stub, and test code.
* Start integrating WorkerPool into node manager
* fix build on mac
* tmp
* adding LsResources boilerplate
* add/build Task spec boilerplate
* checkpoint ActorInformation and LsQueue
* Worker pool maintains started and removed workers
* todos for e2e task assignment
* fix build on mac
* build/add lsqueue interface
* channel resource config through from NodeServer to LsResources; prep LsResources to replace/provide worker_pool
* progress on LsResources class: resource availability check implementation
* Read task submission messages from a client
* Submit tasks from the client to the local scheduler
* Assign a task to a worker from the WorkerPool
* change the way node_manager is built to prevent build issues for object_manager.
* add namespaces. fix build.
* Move ClientConnection message handling into server, remove reference to
WorkerPool
* Add raw constructors for TaskSpecification
* Define TaskArgument by reference and by value
* Flatbuffer serialization for TaskSpec
* expand resource implementation
* Start integrating TaskExecutionSpecification into Task
* Separate WorkerPool from LsResources, give ownership to NodeServer
* checkpoint queue and resource code
* resoving merge conflicts
* lspolicy::schedule ; adding lsqueue and lspolicy to the nodeserver
* Implement LsQueue RemoveTasks and QueueReadyTasks
* Fill in some LsQueue code for assigning a task
* added suport for test_asio
* Implement LsQueue queue tasks methods, queue running tasks
* calling into policy from nodeserver; adding cluster resource map
* Feedback and Testing.
Incorporate Alexey's feedback. Actually test some code. Clean up callback imp.
* end to end task assignment
* Decouple local scheduler from node server
* move TODO
* Move local scheduler to separate file
* Add scaffolding for reconstruction policy, task dependency manager, and object manager
* fix
* asio for store client notifications.
added asio for plasma store connection.
added tests for store notifications.
encapsulate store interaction under store_messenger.
* Move Worker inside of ClientConnection
* Set the assigned task ID in the worker
* Several changes toward object manager implementation.
Store client integration with asio.
Complete OM/OD scaffolding.
* simple simulator to estimate number of retry timeouts
* changing dbclientid --> clientid
* fix build (include sandbox after it's fixed).
* changes to object manager, adding lambdas to the interface
* changing void * callbacks to std::function typed callbacks
* remove use namespace std from .h files.
use ray:: for Status everywhere.
* minor
* lineage cache interfaces
* TODO for object IDs
* Interface for the GCS client table
* Revert "Set the assigned task ID in the worker"
This reverts commit a770dd31048a289ef431c56d64e491fa7f9b2737.
* Revert "Move Worker inside of ClientConnection"
This reverts commit dfaa0d662a76976c05be6d76b214b45d88482818.
* OD/OM: ray::Status
* mock gcs integration.
* gcs mock clientinfo assignment
* Allow lookup of a Worker in the WorkerPool
* Split out Worker and ClientConnection source files
* Allow assignment of a task ID to a worker, skeleton for finishing a task
* integrate mock gcs with om tests.
* added tcp connection acceptor
* integrated OM with NM.
integrated GcsClient with NM.
Added multi-node integration tests.
* OM to receive incoming tcp connections.
* implemented object manager connection protocol.
* Added todos.
* slight adjustment to add/remove handler invocation on object store client.
* Simplify Task interface for getting dependencies
* Remove unused object manager file
* TaskDependencyManager tracks missing task dependencies and processes object add notifications
* Local scheduler queues tasks according to argument availability
* Fill in TaskSpecification methods to get arguments
* Implemented push.
* Queue tasks that have been scheduled but that are waiting for a worker
* Pull + mock gcs cleanup.
* OD/OM/GCS mock code review, fixing unused-result issues, eliminating copy ctor
* Remove unique_ptr from object_store_client
* Fix object manager Push memory error
* Pull task arguments in task dependency manager
* Add a demo script for remote task dependencies
* Some comments for the TaskDependencyManager
* code cleanup; builds on mac
* Make ClientConnection a templated type based on the connection protocol
* Add gmock to build
* Add WorkerPool unit tests
* clean up.
* clean up connection code.
* instantiate a template instance in the module
* Virtual destructors
* Document public api.
* Separate read and write buffers in ClientConnection; documentation
* Remove ObjectDirectory from NodeServer constructor, make directory InitGcs call a separate constructor
* Convert NodeServer Terminate to a destructor
* NodeServer documentation
* WorkerPool documentation
* TaskDependencyManager doc
* unifying naming conventions
* unifying naming conventions
* Task cleanup and documentation
* unifying naming conventions
* unifying naming conventions
* code cleanup and naming conventions
* code cleanup
* Rename om --> object_manager
* Merge with master
* SchedulingQueue doc
* Docs and implementation skeleton for ClientTable
* Node manager documentation
* ReconstructionPolicy doc
* Replace std::bind with lambda in TaskDependencyManager
* lineage cache doc
* Use \param style for doc
* documentation for scheduling policy and resources
* minor code cleanup
* SchedulingResources class documentation + code cleanup
* referencing ray/raylet directory; doxygen documentation
* updating trivial policy
* Fix bug where event loop stops after task submission
* Define entry point for ClientManager for handling new connections
* Node manager to node manager protocol, heartbeat protocol
* Fix flatbuffer
* Fix GCS flatbuffer naming conflict
* client connection moved to common dir.
* rename based on feedback.
* Added google style and 90 char lines clang-format file under src/ray.
* const ref ClientID.
* Incorporated feedback from PR.
* raylet: includes and namespaces
* raylet/om/gcs logging/using
* doxygen style
* camel casing, comments, other style; DBClientID -> ClientID
* object_manager : naming, defines, style
* consistent caps and naming; misc style
* cleaning up client connection + other stylistic fixes
* cmath, std::nan
* more style polish: OM, Raylet, gcs tables
* removing sandbox (moved to ray-project/sandbox)
* raylet linting
* object manager linting
* gcs linting
* all other linting
Co-authored-by: Melih <elibol@gmail.com>
Co-authored-by: Stephanie <swang@cs.berkeley.edu>
2018-03-08 12:53:24 -08:00
|
|
|
ADD_THIRDPARTY_LIB(gmock_main
|
|
|
|
STATIC_LIB ${GMOCK_MAIN_STATIC_LIB})
|
2017-12-14 14:54:09 -08:00
|
|
|
|
|
|
|
add_dependencies(gtest googletest_ep)
|
|
|
|
add_dependencies(gtest_main googletest_ep)
|
[XRay] Raylet node and object manager unification/backend redesign. (#1640)
* directory for raylet
* some initial class scaffolding -- in progress
* node_manager build code and test stub files.
* class scaffolding for resources, workers, and the worker pool
* Node manager server loop
* raylet policy and queue - wip checkpoint
* fix dependencies
* add gen_nm_fbs as target.
* object manager build, stub, and test code.
* Start integrating WorkerPool into node manager
* fix build on mac
* tmp
* adding LsResources boilerplate
* add/build Task spec boilerplate
* checkpoint ActorInformation and LsQueue
* Worker pool maintains started and removed workers
* todos for e2e task assignment
* fix build on mac
* build/add lsqueue interface
* channel resource config through from NodeServer to LsResources; prep LsResources to replace/provide worker_pool
* progress on LsResources class: resource availability check implementation
* Read task submission messages from a client
* Submit tasks from the client to the local scheduler
* Assign a task to a worker from the WorkerPool
* change the way node_manager is built to prevent build issues for object_manager.
* add namespaces. fix build.
* Move ClientConnection message handling into server, remove reference to
WorkerPool
* Add raw constructors for TaskSpecification
* Define TaskArgument by reference and by value
* Flatbuffer serialization for TaskSpec
* expand resource implementation
* Start integrating TaskExecutionSpecification into Task
* Separate WorkerPool from LsResources, give ownership to NodeServer
* checkpoint queue and resource code
* resoving merge conflicts
* lspolicy::schedule ; adding lsqueue and lspolicy to the nodeserver
* Implement LsQueue RemoveTasks and QueueReadyTasks
* Fill in some LsQueue code for assigning a task
* added suport for test_asio
* Implement LsQueue queue tasks methods, queue running tasks
* calling into policy from nodeserver; adding cluster resource map
* Feedback and Testing.
Incorporate Alexey's feedback. Actually test some code. Clean up callback imp.
* end to end task assignment
* Decouple local scheduler from node server
* move TODO
* Move local scheduler to separate file
* Add scaffolding for reconstruction policy, task dependency manager, and object manager
* fix
* asio for store client notifications.
added asio for plasma store connection.
added tests for store notifications.
encapsulate store interaction under store_messenger.
* Move Worker inside of ClientConnection
* Set the assigned task ID in the worker
* Several changes toward object manager implementation.
Store client integration with asio.
Complete OM/OD scaffolding.
* simple simulator to estimate number of retry timeouts
* changing dbclientid --> clientid
* fix build (include sandbox after it's fixed).
* changes to object manager, adding lambdas to the interface
* changing void * callbacks to std::function typed callbacks
* remove use namespace std from .h files.
use ray:: for Status everywhere.
* minor
* lineage cache interfaces
* TODO for object IDs
* Interface for the GCS client table
* Revert "Set the assigned task ID in the worker"
This reverts commit a770dd31048a289ef431c56d64e491fa7f9b2737.
* Revert "Move Worker inside of ClientConnection"
This reverts commit dfaa0d662a76976c05be6d76b214b45d88482818.
* OD/OM: ray::Status
* mock gcs integration.
* gcs mock clientinfo assignment
* Allow lookup of a Worker in the WorkerPool
* Split out Worker and ClientConnection source files
* Allow assignment of a task ID to a worker, skeleton for finishing a task
* integrate mock gcs with om tests.
* added tcp connection acceptor
* integrated OM with NM.
integrated GcsClient with NM.
Added multi-node integration tests.
* OM to receive incoming tcp connections.
* implemented object manager connection protocol.
* Added todos.
* slight adjustment to add/remove handler invocation on object store client.
* Simplify Task interface for getting dependencies
* Remove unused object manager file
* TaskDependencyManager tracks missing task dependencies and processes object add notifications
* Local scheduler queues tasks according to argument availability
* Fill in TaskSpecification methods to get arguments
* Implemented push.
* Queue tasks that have been scheduled but that are waiting for a worker
* Pull + mock gcs cleanup.
* OD/OM/GCS mock code review, fixing unused-result issues, eliminating copy ctor
* Remove unique_ptr from object_store_client
* Fix object manager Push memory error
* Pull task arguments in task dependency manager
* Add a demo script for remote task dependencies
* Some comments for the TaskDependencyManager
* code cleanup; builds on mac
* Make ClientConnection a templated type based on the connection protocol
* Add gmock to build
* Add WorkerPool unit tests
* clean up.
* clean up connection code.
* instantiate a template instance in the module
* Virtual destructors
* Document public api.
* Separate read and write buffers in ClientConnection; documentation
* Remove ObjectDirectory from NodeServer constructor, make directory InitGcs call a separate constructor
* Convert NodeServer Terminate to a destructor
* NodeServer documentation
* WorkerPool documentation
* TaskDependencyManager doc
* unifying naming conventions
* unifying naming conventions
* Task cleanup and documentation
* unifying naming conventions
* unifying naming conventions
* code cleanup and naming conventions
* code cleanup
* Rename om --> object_manager
* Merge with master
* SchedulingQueue doc
* Docs and implementation skeleton for ClientTable
* Node manager documentation
* ReconstructionPolicy doc
* Replace std::bind with lambda in TaskDependencyManager
* lineage cache doc
* Use \param style for doc
* documentation for scheduling policy and resources
* minor code cleanup
* SchedulingResources class documentation + code cleanup
* referencing ray/raylet directory; doxygen documentation
* updating trivial policy
* Fix bug where event loop stops after task submission
* Define entry point for ClientManager for handling new connections
* Node manager to node manager protocol, heartbeat protocol
* Fix flatbuffer
* Fix GCS flatbuffer naming conflict
* client connection moved to common dir.
* rename based on feedback.
* Added google style and 90 char lines clang-format file under src/ray.
* const ref ClientID.
* Incorporated feedback from PR.
* raylet: includes and namespaces
* raylet/om/gcs logging/using
* doxygen style
* camel casing, comments, other style; DBClientID -> ClientID
* object_manager : naming, defines, style
* consistent caps and naming; misc style
* cleaning up client connection + other stylistic fixes
* cmath, std::nan
* more style polish: OM, Raylet, gcs tables
* removing sandbox (moved to ray-project/sandbox)
* raylet linting
* object manager linting
* gcs linting
* all other linting
Co-authored-by: Melih <elibol@gmail.com>
Co-authored-by: Stephanie <swang@cs.berkeley.edu>
2018-03-08 12:53:24 -08:00
|
|
|
add_dependencies(gmock_main googletest_ep)
|
2017-12-14 14:54:09 -08:00
|
|
|
endif()
|
2018-02-20 13:37:09 -08:00
|
|
|
|
2018-10-11 05:33:15 +08:00
|
|
|
include(GlogExternalProject)
|
|
|
|
message(STATUS "Glog home: ${GLOG_HOME}")
|
|
|
|
message(STATUS "Glog include dir: ${GLOG_INCLUDE_DIR}")
|
|
|
|
message(STATUS "Glog static lib: ${GLOG_STATIC_LIB}")
|
2018-09-23 21:52:33 +07:00
|
|
|
|
2018-10-11 05:33:15 +08:00
|
|
|
include_directories(${GLOG_INCLUDE_DIR})
|
|
|
|
ADD_THIRDPARTY_LIB(glog
|
|
|
|
STATIC_LIB ${GLOG_STATIC_LIB})
|
2018-08-24 00:43:38 +08:00
|
|
|
|
2018-10-11 05:33:15 +08:00
|
|
|
add_dependencies(glog glog_ep)
|
update ray cmake build process (#2853)
* use cmake to build ray project, no need to appply build.sh before cmake, fix some abuse of cmake, improve the build performance
* support boost external project, avoid using the system or build.sh boost
* keep compatible with build.sh, remove boost and arrow build from it.
* bugfix: parquet bison version control, plasma_java lib install problem
* bugfix: cmake, do not compile plasma java client if no need
* bugfix: component failures test timeout machenism has problem for plasma manager failed case
* bugfix: arrow use lib64 in centos, travis check-git-clang-format-output.sh does not support other branches except master
* revert some fix
* set arrow python executable, fix format error in component_failures_test.py
* make clean arrow python build directory
* update cmake code style, back to support cmake minimum version 3.4
2018-09-13 02:19:33 +08:00
|
|
|
|
|
|
|
# boost
|
|
|
|
include(BoostExternalProject)
|
|
|
|
|
|
|
|
message(STATUS "Boost root: ${BOOST_ROOT}")
|
|
|
|
message(STATUS "Boost include dir: ${Boost_INCLUDE_DIR}")
|
|
|
|
message(STATUS "Boost system library: ${Boost_SYSTEM_LIBRARY}")
|
|
|
|
message(STATUS "Boost filesystem library: ${Boost_FILESYSTEM_LIBRARY}")
|
|
|
|
include_directories(${Boost_INCLUDE_DIR})
|
|
|
|
|
2018-09-23 21:52:33 +07:00
|
|
|
ADD_THIRDPARTY_LIB(boost_system
|
|
|
|
STATIC_LIB ${Boost_SYSTEM_LIBRARY})
|
|
|
|
ADD_THIRDPARTY_LIB(boost_filesystem
|
|
|
|
STATIC_LIB ${Boost_FILESYSTEM_LIBRARY})
|
2018-12-22 13:25:48 -08:00
|
|
|
ADD_THIRDPARTY_LIB(boost_thread
|
|
|
|
STATIC_LIB ${Boost_THREAD_LIBRARY})
|
2018-09-23 21:52:33 +07:00
|
|
|
|
|
|
|
add_dependencies(boost_system boost_ep)
|
|
|
|
add_dependencies(boost_filesystem boost_ep)
|
2018-12-22 13:25:48 -08:00
|
|
|
add_dependencies(boost_thread boost_ep)
|
2018-09-23 21:52:33 +07:00
|
|
|
|
2018-12-22 13:25:48 -08:00
|
|
|
add_custom_target(boost DEPENDS boost_system boost_filesystem boost_thread)
|
2018-09-23 21:52:33 +07:00
|
|
|
|
update ray cmake build process (#2853)
* use cmake to build ray project, no need to appply build.sh before cmake, fix some abuse of cmake, improve the build performance
* support boost external project, avoid using the system or build.sh boost
* keep compatible with build.sh, remove boost and arrow build from it.
* bugfix: parquet bison version control, plasma_java lib install problem
* bugfix: cmake, do not compile plasma java client if no need
* bugfix: component failures test timeout machenism has problem for plasma manager failed case
* bugfix: arrow use lib64 in centos, travis check-git-clang-format-output.sh does not support other branches except master
* revert some fix
* set arrow python executable, fix format error in component_failures_test.py
* make clean arrow python build directory
* update cmake code style, back to support cmake minimum version 3.4
2018-09-13 02:19:33 +08:00
|
|
|
# flatbuffers
|
|
|
|
include(FlatBuffersExternalProject)
|
|
|
|
|
|
|
|
message(STATUS "Flatbuffers home: ${FLATBUFFERS_HOME}")
|
|
|
|
message(STATUS "Flatbuffers include dir: ${FLATBUFFERS_INCLUDE_DIR}")
|
|
|
|
message(STATUS "Flatbuffers static library: ${FLATBUFFERS_STATIC_LIB}")
|
|
|
|
message(STATUS "Flatbuffers compiler: ${FLATBUFFERS_COMPILER}")
|
|
|
|
include_directories(SYSTEM ${FLATBUFFERS_INCLUDE_DIR})
|
|
|
|
|
|
|
|
ADD_THIRDPARTY_LIB(flatbuffers STATIC_LIB ${FLATBUFFERS_STATIC_LIB})
|
|
|
|
|
|
|
|
add_dependencies(flatbuffers flatbuffers_ep)
|
|
|
|
|
|
|
|
# Apache Arrow, use FLATBUFFERS_HOME and BOOST_ROOT
|
|
|
|
include(ArrowExternalProject)
|
|
|
|
|
|
|
|
message(STATUS "Arrow home: ${ARROW_HOME}")
|
|
|
|
message(STATUS "Arrow source dir: ${ARROW_SOURCE_DIR}")
|
|
|
|
message(STATUS "Arrow include dir: ${ARROW_INCLUDE_DIR}")
|
|
|
|
message(STATUS "Arrow static library: ${ARROW_STATIC_LIB}")
|
|
|
|
message(STATUS "Arrow shared library: ${ARROW_SHARED_LIB}")
|
|
|
|
include_directories(SYSTEM ${ARROW_INCLUDE_DIR})
|
|
|
|
|
|
|
|
ADD_THIRDPARTY_LIB(arrow STATIC_LIB ${ARROW_STATIC_LIB})
|
|
|
|
|
|
|
|
add_dependencies(arrow arrow_ep)
|
|
|
|
|
|
|
|
# Plasma, it is already built in arrow
|
|
|
|
message(STATUS "Plasma include dir: ${PLASMA_INCLUDE_DIR}")
|
|
|
|
message(STATUS "Plasma static library: ${PLASMA_STATIC_LIB}")
|
|
|
|
message(STATUS "Plasma shared library: ${PLASMA_SHARED_LIB}")
|
|
|
|
include_directories(SYSTEM ${PLASMA_INCLUDE_DIR})
|
|
|
|
|
|
|
|
ADD_THIRDPARTY_LIB(plasma STATIC_LIB ${PLASMA_STATIC_LIB})
|
|
|
|
|
|
|
|
add_dependencies(plasma plasma_ep)
|
|
|
|
|
|
|
|
if ("${CMAKE_RAY_LANG_PYTHON}" STREQUAL "YES")
|
|
|
|
# clean the arrow_ep/python/build/lib.xxxxx directory,
|
|
|
|
# or when you build with another python version, it creates multiple lib.xxxx directories
|
|
|
|
set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${ARROW_SOURCE_DIR}/python/build/")
|
2018-09-17 13:32:09 +08:00
|
|
|
set_property(DIRECTORY APPEND PROPERTY ADDITIONAL_MAKE_CLEAN_FILES "${CMAKE_SOURCE_DIR}/python/ray/pyarrow_files/pyarrow")
|
update ray cmake build process (#2853)
* use cmake to build ray project, no need to appply build.sh before cmake, fix some abuse of cmake, improve the build performance
* support boost external project, avoid using the system or build.sh boost
* keep compatible with build.sh, remove boost and arrow build from it.
* bugfix: parquet bison version control, plasma_java lib install problem
* bugfix: cmake, do not compile plasma java client if no need
* bugfix: component failures test timeout machenism has problem for plasma manager failed case
* bugfix: arrow use lib64 in centos, travis check-git-clang-format-output.sh does not support other branches except master
* revert some fix
* set arrow python executable, fix format error in component_failures_test.py
* make clean arrow python build directory
* update cmake code style, back to support cmake minimum version 3.4
2018-09-13 02:19:33 +08:00
|
|
|
|
|
|
|
# here we use externalProject to process pyarrow building
|
|
|
|
# add_custom_command would have problem with setup.py
|
2018-10-11 05:33:15 +08:00
|
|
|
if(EXISTS ${ARROW_SOURCE_DIR}/python/build/)
|
|
|
|
# if we did not run `make clean`, skip the rebuild of pyarrow
|
|
|
|
add_custom_target(pyarrow_ext)
|
|
|
|
else()
|
|
|
|
# pyarrow
|
|
|
|
find_package(PythonInterp REQUIRED)
|
|
|
|
message(STATUS "PYTHON_EXECUTABLE for pyarrow: ${PYTHON_EXECUTABLE}")
|
|
|
|
|
|
|
|
# PYARROW_PARALLEL= , so it will add -j to pyarrow build
|
|
|
|
set(pyarrow_ENV
|
2019-01-14 03:20:26 -08:00
|
|
|
"SETUPTOOLS_SCM_PRETEND_VERSION=0.12.0-RAY"
|
2018-10-11 05:33:15 +08:00
|
|
|
"PKG_CONFIG_PATH=${ARROW_LIBRARY_DIR}/pkgconfig"
|
|
|
|
"PYARROW_WITH_PLASMA=1"
|
|
|
|
"PYARROW_WITH_TENSORFLOW=1"
|
|
|
|
"PYARROW_BUNDLE_ARROW_CPP=1"
|
|
|
|
"PARQUET_HOME=${PARQUET_HOME}"
|
2018-12-22 13:25:48 -08:00
|
|
|
"BOOST_ROOT=${BOOST_ROOT}"
|
2018-10-11 05:33:15 +08:00
|
|
|
"PYARROW_WITH_PARQUET=1"
|
|
|
|
"PYARROW_PARALLEL=")
|
|
|
|
|
2018-11-04 16:32:58 -08:00
|
|
|
if (APPLE)
|
|
|
|
# Since 10.14, the XCode toolchain only accepts libc++ as the
|
2018-11-21 10:56:17 -08:00
|
|
|
# standard library. We set it only on 10.14, because on some
|
|
|
|
# configurations of older macOS, we get the following error
|
|
|
|
# with libc++:
|
|
|
|
# [...]/usr/bin/c++ '-stdlib=libc++' -isysroot [...] -mmacosx-version-min=10.6 [...]
|
|
|
|
# clang: error: invalid deployment target for -stdlib=libc++ (requires OS X 10.7 or later)
|
|
|
|
|
|
|
|
exec_program(uname ARGS -v OUTPUT_VARIABLE DARWIN_VERSION)
|
|
|
|
string(REGEX MATCH "[0-9]+" DARWIN_VERSION ${DARWIN_VERSION})
|
|
|
|
message(STATUS "-- Darwin version = ${DARWIN_VERSION}")
|
|
|
|
if (DARWIN_VERSION GREATER 17)
|
|
|
|
set(pyarrow_ENV ${pyarrow_ENV} "CXXFLAGS='-stdlib=libc++'")
|
|
|
|
endif()
|
2018-11-04 16:32:58 -08:00
|
|
|
endif()
|
|
|
|
|
2018-10-11 05:33:15 +08:00
|
|
|
ExternalProject_Add(pyarrow_ext
|
|
|
|
PREFIX external/pyarrow
|
|
|
|
DEPENDS arrow_ep
|
|
|
|
DOWNLOAD_COMMAND ""
|
|
|
|
BUILD_IN_SOURCE 1
|
|
|
|
CONFIGURE_COMMAND cd ${ARROW_SOURCE_DIR}/python && ${CMAKE_COMMAND} -E env ${pyarrow_ENV} ${PYTHON_EXECUTABLE} setup.py build
|
|
|
|
BUILD_COMMAND cd ${ARROW_SOURCE_DIR}/python && ${CMAKE_COMMAND} -E env ${pyarrow_ENV} ${PYTHON_EXECUTABLE} setup.py build_ext
|
|
|
|
INSTALL_COMMAND bash -c "cp -rf \$(find ${ARROW_SOURCE_DIR}/python/build/ -maxdepth 1 -type d -print | grep -m1 'lib')/pyarrow ${CMAKE_SOURCE_DIR}/python/ray/pyarrow_files/")
|
|
|
|
|
|
|
|
endif()
|
update ray cmake build process (#2853)
* use cmake to build ray project, no need to appply build.sh before cmake, fix some abuse of cmake, improve the build performance
* support boost external project, avoid using the system or build.sh boost
* keep compatible with build.sh, remove boost and arrow build from it.
* bugfix: parquet bison version control, plasma_java lib install problem
* bugfix: cmake, do not compile plasma java client if no need
* bugfix: component failures test timeout machenism has problem for plasma manager failed case
* bugfix: arrow use lib64 in centos, travis check-git-clang-format-output.sh does not support other branches except master
* revert some fix
* set arrow python executable, fix format error in component_failures_test.py
* make clean arrow python build directory
* update cmake code style, back to support cmake minimum version 3.4
2018-09-13 02:19:33 +08:00
|
|
|
|
|
|
|
endif ()
|