summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlfredo Tupone <tupone@gentoo.org>2024-03-08 19:40:10 +0100
committerAlfredo Tupone <tupone@gentoo.org>2024-03-08 19:45:29 +0100
commit087c7302f2f874abf503fad2c0eaf04fde04cb40 (patch)
treef46ec93eadebe89b64d97d980a265a856e89d32e
parentmedia-libs/openexr: fix x86 musl build for version 3.1.7 (diff)
downloadgentoo-087c7302f2f874abf503fad2c0eaf04fde04cb40.tar.gz
gentoo-087c7302f2f874abf503fad2c0eaf04fde04cb40.tar.bz2
gentoo-087c7302f2f874abf503fad2c0eaf04fde04cb40.zip
sci-libs/caffe2: add 2.2.1
Signed-off-by: Alfredo Tupone <tupone@gentoo.org>
-rw-r--r--sci-libs/caffe2/Manifest1
-rw-r--r--sci-libs/caffe2/caffe2-2.2.1.ebuild230
-rw-r--r--sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch195
3 files changed, 426 insertions, 0 deletions
diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index a05e285f98e0..2a63d8018442 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1,3 +1,4 @@
DIST pytorch-1.13.1.tar.gz 108279745 BLAKE2B 75de03b74dfdaf8d8fb5ea743fcc0c1b0e408a714ad4160c487921220a7b1755e5fa6e587e6bbc8c9f34dd75e096d2e6dd69c80d24821835fff6c833314434d3 SHA512 f16f89d027efade11d057245cad5b69a390e88b458398310ae30de2dbff7c8fd7f1165be7b8da7ea989c81ac3f5a66c5cb9050610e441a97c83fb8aa28c0bd62
DIST pytorch-2.0.1.tar.gz 111335778 BLAKE2B 7a10cc2b2d5e2422aef7e060a0c3a62ca5c7460c6e0b9becade9b98939501975c74ed5a175a653731f43ca824d2c9bd31f41d1f633c2b139779ab23d5331e9ce SHA512 2309a22b3be3ccdb36d8d9781a59a7bdcc2fdb8d95ada205702ec77862480f0cbb12cd5d6b8cd3114d01a6e33b7743d0fe9de93debf37138ca5c14403cdb0c43
DIST pytorch-2.1.2.tar.gz 116316469 BLAKE2B c5a55ee264bc3477d3556ba6376b5591117e992e56e0dd0c9ba93d12526e2727f7840f6f1e0730a38223b6492c9556840c4ebf22ffd220e97225c2abff303747 SHA512 a8961d78ad785b13c959a0612563a60e0de17a7c8bb9822ddea9a24072796354d07e81c47b6cc8761b21a6448845b088cf80e1661d9e889b0ed5474d3dc76756
+DIST pytorch-2.2.1.tar.gz 116370903 BLAKE2B 7d08e80f91bad76fba1751c30a34bebfe7145058b7758c0d47112702263a80666f70687a8860744725c6aa995e854f766a5bfa4644c23e5635e7e08c8d63a6e9 SHA512 f19ebcf59d183c3348946ba7cfcab2bc4ca93785863b8edc39dba5772083a7b0425ccb4f92a8df4dc0d18246c75e8ff812993161467fbf9dc48d7fb28a1e26f1
diff --git a/sci-libs/caffe2/caffe2-2.2.1.ebuild b/sci-libs/caffe2/caffe2-2.2.1.ebuild
new file mode 100644
index 000000000000..68328fb71624
--- /dev/null
+++ b/sci-libs/caffe2/caffe2-2.2.1.ebuild
@@ -0,0 +1,230 @@
+# Copyright 2022-2024 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+PYTHON_COMPAT=( python3_{9..12} )
+inherit python-single-r1 cmake cuda flag-o-matic prefix
+
+MYPN=pytorch
+MYP=${MYPN}-${PV}
+
+DESCRIPTION="A deep learning framework"
+HOMEPAGE="https://pytorch.org/"
+SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
+ -> ${MYP}.tar.gz"
+
+LICENSE="BSD"
+SLOT="0"
+KEYWORDS="~amd64"
+IUSE="cuda distributed fbgemm ffmpeg gloo mkl mpi nnpack +numpy onednn openblas opencl opencv openmp qnnpack xnnpack"
+RESTRICT="test"
+REQUIRED_USE="
+ ${PYTHON_REQUIRED_USE}
+ ffmpeg? ( opencv )
+ mpi? ( distributed )
+ gloo? ( distributed )
+" # ?? ( cuda rocm )
+
+# CUDA 12 not supported yet: https://github.com/pytorch/pytorch/issues/91122
+RDEPEND="
+ ${PYTHON_DEPS}
+ dev-cpp/gflags:=
+ >=dev-cpp/glog-0.5.0
+ dev-libs/cpuinfo
+ dev-libs/libfmt
+ dev-libs/protobuf:=
+ dev-libs/pthreadpool
+ dev-libs/sleef
+ virtual/lapack
+ >=sci-libs/onnx-1.12.0
+ <sci-libs/onnx-1.15.0
+ sci-libs/foxi
+ cuda? (
+ =dev-libs/cudnn-8*
+ >=dev-libs/cudnn-frontend-0.9.2:0/8
+ dev-util/nvidia-cuda-toolkit:=[profiler]
+ )
+ fbgemm? ( >=dev-libs/FBGEMM-2023.11.02 )
+ ffmpeg? ( media-video/ffmpeg:= )
+ gloo? ( sci-libs/gloo[cuda?] )
+ mpi? ( virtual/mpi )
+ nnpack? ( sci-libs/NNPACK )
+ numpy? ( $(python_gen_cond_dep '
+ dev-python/numpy[${PYTHON_USEDEP}]
+ ') )
+ onednn? ( dev-libs/oneDNN )
+ opencl? ( virtual/opencl )
+ opencv? ( media-libs/opencv:= )
+ qnnpack? ( sci-libs/QNNPACK )
+ distributed? ( sci-libs/tensorpipe[cuda?] )
+ xnnpack? ( >=sci-libs/XNNPACK-2022.12.22 )
+ mkl? ( sci-libs/mkl )
+ openblas? ( sci-libs/openblas )
+"
+DEPEND="
+ ${RDEPEND}
+ cuda? ( >=dev-libs/cutlass-3.1.0 )
+ onednn? ( sci-libs/ideep )
+ dev-libs/psimd
+ dev-libs/FP16
+ dev-libs/FXdiv
+ dev-libs/pocketfft
+ dev-libs/flatbuffers
+ >=sci-libs/kineto-0.4.0_p20231031
+ $(python_gen_cond_dep '
+ dev-python/pyyaml[${PYTHON_USEDEP}]
+ dev-python/pybind11[${PYTHON_USEDEP}]
+ ')
+"
+
+S="${WORKDIR}"/${MYP}
+
+PATCHES=(
+ "${FILESDIR}"/${P}-gentoo.patch
+ "${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
+ "${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
+ "${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
+ "${FILESDIR}"/${PN}-2.0.0-gcc13.patch
+ "${FILESDIR}"/${PN}-2.0.0-cudnn_include_fix.patch
+ "${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
+ "${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
+)
+
+src_prepare() {
+ filter-lto #bug 862672
+ sed -i \
+ -e "/third_party\/gloo/d" \
+ cmake/Dependencies.cmake \
+ || die
+ cmake_src_prepare
+ pushd torch/csrc/jit/serialization || die
+ flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
+ popd
+ # prefixify the hardcoded paths, after all patches are applied
+ hprefixify \
+ aten/CMakeLists.txt \
+ caffe2/CMakeLists.txt \
+ cmake/Metal.cmake \
+ cmake/Modules/*.cmake \
+ cmake/Modules_CUDA_fix/FindCUDNN.cmake \
+ cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \
+ cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \
+ cmake/public/LoadHIP.cmake \
+ cmake/public/cuda.cmake \
+ cmake/Dependencies.cmake \
+ torch/CMakeLists.txt \
+ CMakeLists.txt
+}
+
+src_configure() {
+ if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
+ ewarn "WARNING: caffe2 is being built with its default CUDA compute capabilities: 3.5 and 7.0."
+ ewarn "These may not be optimal for your GPU."
+ ewarn ""
+ ewarn "To configure caffe2 with the CUDA compute capability that is optimal for your GPU,"
+ ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and re-emerge caffe2."
+ ewarn "For example, to use CUDA capability 7.5 & 3.5, add: TORCH_CUDA_ARCH_LIST=7.5 3.5"
+ ewarn "For a Maxwell model GPU, an example value would be: TORCH_CUDA_ARCH_LIST=Maxwell"
+ ewarn ""
+ ewarn "You can look up your GPU's CUDA compute capability at https://developer.nvidia.com/cuda-gpus"
+ ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | grep 'CUDA Capability'"
+ fi
+
+ local mycmakeargs=(
+ -DBUILD_CUSTOM_PROTOBUF=OFF
+ -DBUILD_SHARED_LIBS=ON
+
+ -DUSE_CCACHE=OFF
+ -DUSE_CUDA=$(usex cuda)
+ -DUSE_CUDNN=$(usex cuda)
+ -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 7.0}"
+ -DBUILD_NVFUSER=$(usex cuda)
+ -DUSE_DISTRIBUTED=$(usex distributed)
+ -DUSE_MPI=$(usex mpi)
+ -DUSE_FAKELOWP=OFF
+ -DUSE_FBGEMM=$(usex fbgemm)
+ -DUSE_FFMPEG=$(usex ffmpeg)
+ -DUSE_GFLAGS=ON
+ -DUSE_GLOG=ON
+ -DUSE_GLOO=$(usex gloo)
+ -DUSE_KINETO=OFF # TODO
+ -DUSE_LEVELDB=OFF
+ -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
+ -DUSE_MKLDNN=$(usex onednn)
+ -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication Library
+ -DUSE_NNPACK=$(usex nnpack)
+ -DUSE_QNNPACK=$(usex qnnpack)
+ -DUSE_XNNPACK=$(usex xnnpack)
+ -DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
+ -DUSE_TENSORPIPE=$(usex distributed)
+ -DUSE_PYTORCH_QNNPACK=OFF
+ -DUSE_NUMPY=$(usex numpy)
+ -DUSE_OPENCL=$(usex opencl)
+ -DUSE_OPENCV=$(usex opencv)
+ -DUSE_OPENMP=$(usex openmp)
+ -DUSE_ROCM=OFF # TODO
+ -DUSE_SYSTEM_CPUINFO=ON
+ -DUSE_SYSTEM_PYBIND11=ON
+ -DUSE_UCC=OFF
+ -DUSE_VALGRIND=OFF
+ -DPYBIND11_PYTHON_VERSION="${EPYTHON#python}"
+ -DPYTHON_EXECUTABLE="${PYTHON}"
+ -DUSE_ITT=OFF
+ -DUSE_SYSTEM_PTHREADPOOL=ON
+ -DUSE_SYSTEM_FXDIV=ON
+ -DUSE_SYSTEM_FP16=ON
+ -DUSE_SYSTEM_GLOO=ON
+ -DUSE_SYSTEM_ONNX=ON
+ -DUSE_SYSTEM_SLEEF=ON
+ -DUSE_METAL=OFF
+
+ -Wno-dev
+ -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
+ -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
+ )
+
+ if use mkl; then
+ mycmakeargs+=(-DBLAS=MKL)
+ elif use openblas; then
+ mycmakeargs+=(-DBLAS=OpenBLAS)
+ else
+ mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=)
+ fi
+
+ if use cuda; then
+ addpredict "/dev/nvidiactl" # bug 867706
+ addpredict "/dev/char"
+
+ mycmakeargs+=(
+ -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
+ )
+ fi
+
+ if use onednn; then
+ mycmakeargs+=(
+ -DUSE_MKLDNN=ON
+ -DMKLDNN_FOUND=ON
+ -DMKLDNN_LIBRARIES=dnnl
+ -DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl"
+ )
+ fi
+
+ cmake_src_configure
+}
+
+src_install() {
+ cmake_src_install
+
+ insinto "/var/lib/${PN}"
+ doins "${BUILD_DIR}"/CMakeCache.txt
+
+ rm -rf python
+ mkdir -p python/torch/include || die
+ mv "${ED}"/usr/lib/python*/site-packages/caffe2 python/ || die
+ cp torch/version.py python/torch/ || die
+ python_domodule python/caffe2
+ python_domodule python/torch
+ ln -s ../../../../../include/torch \
+ "${D}$(python_get_sitedir)"/torch/include/torch || die # bug 923269
+}
diff --git a/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch b/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch
new file mode 100644
index 000000000000..5472a2c41836
--- /dev/null
+++ b/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch
@@ -0,0 +1,195 @@
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -474,7 +474,7 @@
+ endif()
+
+ # ---[ QNNPACK
+-if(USE_QNNPACK)
++if(FALSE)
+ set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+
+ if(NOT DEFINED QNNPACK_SOURCE_DIR)
+@@ -530,7 +530,7 @@
+ endif()
+
+ # ---[ Caffe2 Int8 operators (enabled by USE_QNNPACK) depend on gemmlowp and neon2sse headers
+-if(USE_QNNPACK)
++if(FALSE)
+ set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+ include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
+ include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/neon2sse")
+@@ -780,7 +780,7 @@
+ endif()
+
+ # ---[ FBGEMM
+-if(USE_FBGEMM)
++if(FALSE)
+ set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+ if(NOT DEFINED FBGEMM_SOURCE_DIR)
+ set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING "FBGEMM source directory")
+@@ -828,6 +828,7 @@
+ endif()
+
+ if(USE_FBGEMM)
++ list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
+ caffe2_update_option(USE_FBGEMM ON)
+ else()
+ caffe2_update_option(USE_FBGEMM OFF)
+@@ -1529,7 +1530,6 @@
+ set_target_properties(onnx_proto PROPERTIES CXX_STANDARD 17)
+ endif()
+ endif()
+- add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi EXCLUDE_FROM_ALL)
+
+ add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
+ if(NOT USE_SYSTEM_ONNX)
+@@ -1796,7 +1796,6 @@
+ #
+ set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
+ set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
+-add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
+
+ # Disable compiler feature checks for `fmt`.
+ #
+@@ -1805,9 +1804,7 @@
+ # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
+ # `fmt` is compatible with a superset of the compilers that PyTorch is, it
+ # shouldn't be too bad to just disable the checks.
+-set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
+
+-list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
+ set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
+
+ # ---[ Kineto
+--- a/c10/CMakeLists.txt
++++ b/c10/CMakeLists.txt
+@@ -89,7 +89,7 @@
+ if(C10_USE_GLOG)
+ target_link_libraries(c10 PUBLIC glog::glog)
+ endif()
+-target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
++target_link_libraries(c10 PRIVATE fmt)
+
+ if(C10_USE_NUMA)
+ target_include_directories(c10 PRIVATE ${Numa_INCLUDE_DIR})
+--- a/torch/CMakeLists.txt
++++ b/torch/CMakeLists.txt
+@@ -59,15 +59,9 @@
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_BINARY_DIR}/aten/src
+ ${CMAKE_BINARY_DIR}/caffe2/aten/src
+- ${CMAKE_BINARY_DIR}/third_party
+- ${CMAKE_BINARY_DIR}/third_party/onnx
+
+- ${TORCH_ROOT}/third_party/valgrind-headers
+
+- ${TORCH_ROOT}/third_party/gloo
+- ${TORCH_ROOT}/third_party/onnx
+- ${TORCH_ROOT}/third_party/flatbuffers/include
+- ${TORCH_ROOT}/third_party/kineto/libkineto/include
++ /usr/include/kineto
+
+ ${TORCH_SRC_DIR}/csrc
+ ${TORCH_SRC_DIR}/csrc/api/include
+@@ -80,7 +74,6 @@
+ python::python
+ pybind::pybind11
+ shm
+- fmt::fmt-header-only
+ ATEN_CPU_FILES_GEN_LIB)
+
+ if(USE_ASAN AND TARGET Sanitizer::address)
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -835,12 +835,11 @@
+ # Re-include to override append_cxx_flag_if_supported from third_party/FBGEMM
+ include(cmake/public/utils.cmake)
+ if(NOT MSVC)
+- string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
++ string(APPEND CMAKE_CXX_FLAGS " -O2")
+ # Eigen fails to build with some versions, so convert this to a warning
+ # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
+ string(APPEND CMAKE_CXX_FLAGS " -Wall")
+ string(APPEND CMAKE_CXX_FLAGS " -Wextra")
+- append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
+@@ -930,7 +930,6 @@
+ string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
+ append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
+- append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
+ else()
+ # skip unwanted includes from windows.h
+ add_compile_definitions(WIN32_LEAN_AND_MEAN)
+--- a/cmake/public/utils.cmake
++++ b/cmake/public/utils.cmake
+@@ -486,8 +486,6 @@
+ endif()
+
+ # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression)
+- target_compile_options(${libname} PRIVATE
+- $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
+
+ endfunction()
+
+--- a/cmake/Codegen.cmake
++++ b/cmake/Codegen.cmake
+@@ -57,7 +57,7 @@
+ if(MSVC)
+ set(OPT_FLAG "/fp:strict ")
+ else(MSVC)
+- set(OPT_FLAG "-O3 ")
++ set(OPT_FLAG " ")
+ if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
+ set(OPT_FLAG " ")
+ endif()
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -107,7 +107,7 @@
+ # Note: the folders that are being commented out have not been properly
+ # addressed yet.
+
+-if(NOT MSVC AND USE_XNNPACK)
++if(FALSE)
+ if(NOT TARGET fxdiv)
+ set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
+ set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
+@@ -1055,7 +1055,6 @@
+ endif()
+
+ if(NOT MSVC AND USE_XNNPACK)
+- TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
+ endif()
+
+ # ==========================================================
+@@ -1175,8 +1174,7 @@
+ target_include_directories(torch_cpu PRIVATE
+ ${TORCH_ROOT}/third_party/miniz-2.1.0)
+
+-target_include_directories(torch_cpu PRIVATE
+- ${TORCH_ROOT}/third_party/kineto/libkineto/include)
++target_include_directories(torch_cpu PRIVATE /usr/include/kineto)
+
+ if(USE_KINETO)
+ target_include_directories(torch_cpu PRIVATE
+--- a/cmake/External/nnpack.cmake
++++ b/cmake/External/nnpack.cmake
+@@ -56,7 +56,7 @@
+ set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE STRING "pthreadpool source directory")
+ set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE STRING "Google Test source directory")
+
+- if(NOT TARGET nnpack)
++ if(FALSE)
+ if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
+ set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
+ endif()
+--- a/functorch/CMakeLists.txt 2023-11-30 20:30:45.805209036 +0100
++++ b/functorch/CMakeLists.txt 2023-11-30 20:31:13.284766157 +0100
+@@ -35,4 +35,4 @@
+ if(NOT ${TORCH_PYTHON_LINK_FLAGS} STREQUAL "")
+ set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS ${TORCH_PYTHON_LINK_FLAGS})
+ endif()
+-install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}")
++install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_INSTALL_LIBDIR}")