首页 > 解决方案 > 如何使用 cmake 制作 PyTorch 扩展

问题描述

本教程演示了如何为 PyTorch 制作基于 C++/CUDA 的 Python 扩展。但是出于……原因……我的用例比这更复杂,并且不完全适合本教程描述的 Python setuptools 框架。

有没有办法使用 cmake 编译扩展 PyTorch 的 Python 库?

标签: cmakepytorch

解决方案


是的。

诀窍是cmake将我们需要的所有 C++ 和 CUDA 文件组合在一起,并使用 PyBind11 构建我们想要的接口;幸运的是,PyBind11 包含在 PyTorch 中。

以下代码在此 Github 存储库中收集并保持最新。

我们的项目包含几个文件:

CMakeLists.txt

cmake_minimum_required (VERSION 3.9)

project(pytorch_cmake_example LANGUAGES CXX CUDA)

find_package(Python REQUIRED COMPONENTS Development)
find_package(Torch REQUIRED)

# Modify if you need a different default value
if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
  set(CMAKE_CUDA_ARCHITECTURES 61)
endif()

# List all your code files here
add_library(pytorch_cmake_example SHARED
  main.cu
)
target_compile_features(pytorch_cmake_example PRIVATE cxx_std_11)
target_link_libraries(pytorch_cmake_example PRIVATE ${TORCH_LIBRARIES} Python::Python)

# Use if the default GCC version gives issues.
# Similar syntax is used if we need better compilation flags.
target_compile_options(pytorch_cmake_example PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:-ccbin g++-9>)

# Use a variant of this if you're on an earlier cmake than 3.18
# target_compile_options(pytorch_cmake_example PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:-gencode arch=compute_61,code=sm_61>)

主文件

#include <c10/cuda/CUDAException.h>
#include <torch/extension.h>
#include <torch/library.h>

using namespace at;


int64_t integer_round(int64_t num, int64_t denom){
  return (num + denom - 1) / denom;
}


template<class T>
__global__ void add_one_kernel(const T *const input, T *const output, const int64_t N){
  // Grid-strided loop
  for(int i=blockDim.x*blockIdx.x+threadIdx.x;i<N;i+=blockDim.x*gridDim.x){
    output[i] = input[i] + 1;
  }
}


///Adds one to each element of a tensor
Tensor add_one(const Tensor &input){
  auto output = torch::zeros_like(input);

  // Common values:
  // AT_DISPATCH_INDEX_TYPES
  // AT_DISPATCH_FLOATING_TYPES
  // AT_DISPATCH_INTEGRAL_TYPES
  AT_DISPATCH_ALL_TYPES(
    input.scalar_type(), "add_one_cuda", [&](){
      const auto block_size = 128;
      const auto num_blocks = std::min(65535L, integer_round(input.numel(), block_size));
      add_one_kernel<<<num_blocks, block_size>>>(
        input.data_ptr<scalar_t>(),
        output.data_ptr<scalar_t>(),
        input.numel()
      );
      // Always test your kernel launches
      C10_CUDA_KERNEL_LAUNCH_CHECK();
    }
  );

  return output;
}


///Note that we can have multiple implementations spread across multiple files, though there should only be one `def`
TORCH_LIBRARY(pytorch_cmake_example, m) {
  m.def("add_one(Tensor input) -> Tensor");
  m.impl("add_one", c10::DispatchKey::CUDA, TORCH_FN(add_one));
  //c10::DispatchKey::CPU is also an option
}

汇编

使用以下命令编译它:

cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_PREFIX_PATH=`python -c 'import torch;print(torch.utils.cmake_prefix_path)'` -GNinja ..

测试.py

然后,您可以运行以下测试脚本。

import torch
torch.ops.load_library("build/libpytorch_cmake_example.so")

shape = (3,3,3)
a = torch.randint(0, 10, shape, dtype=torch.float).cuda()
a_plus_one = torch.ops.pytorch_cmake_example.add_one(a)

推荐阅读