首页 > 解决方案 > 使用推力在 GPU 上进行随机排列

问题描述

我正在尝试编写将在 gpu 上置换向量的代码,但我很难让 Thrust 合作。目前,下面的代码编译得很好,但对向量 r 的顺序没有任何作用。请帮忙。谢谢!

void rng_permutation<float>(const int n, float* r){

  float* order;
  cudaMalloc((void**)&order, n* sizeof(float));

  /*
  some lines of code that generate uniform random floats between 0 and 1 that I know work
  */

  thrust::device_ptr<float> order_(order);
  thrust::device_vector<float> order__(order_, order_ + n);
  thrust::device_ptr<float> r_(r);
  thrust::device_vector<float> r__(r_, r_ + n);

  thrust::sort_by_key(order__.begin(), order__.end(), r__.begin());

  thrust::copy(order_, order_ + n, order__.begin());
  thrust::copy(r_, r_ + n, r__.begin()); 


  cudaFree(order);

  order__.clear();
  r__.clear();
  thrust::device_vector<float>().swap(order__);
  thrust::device_vector<float>().swap(r__);

}

标签: cudagputhrust

解决方案


你在这里有你的来源和目的地:

thrust::copy(order_, order_ + n, order__.begin());
thrust::copy(r_, r_ + n, r__.begin()); 

上一行代码只是对order__. 然后,您将复制其order_顶部的内容(第一个参数thrust::copy是源参数,最后一个是目标参数)。没有意义。相反,扭转这一点:

  thrust::copy(order__.begin(), order__.end(), order_);
  thrust::copy(r__.begin(), r__.end(), r_);

你会得到明智的结果:

$ cat t312.cu
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <iostream>
#include <thrust/sequence.h>

template <typename T>
void caffe_gpu_rng_uniform(int n, T lo, T hi, T *o)
{
  T *d = (T *)malloc(n*sizeof(T));
  for (int i = 0; i < n; i++) d[i] = (rand()/(float)RAND_MAX)*(hi-lo) + lo;
  cudaMemcpy(o, d, n*sizeof(T), cudaMemcpyHostToDevice);
  free(d);
};

template <typename T>
void print_gpu_array_entries(T *o, int x , int y , int n){
  thrust::copy_n(thrust::device_pointer_cast<T>(o), x, std::ostream_iterator<T>(std::cout, ","));
  std::cout << std::endl;
}

void rng_permutation(const int n, float* r){

  float* order;
  cudaMalloc((void**)&order, n* sizeof(float));

  caffe_gpu_rng_uniform<float>(n, (float)0.0, (float)1.0, order);


  print_gpu_array_entries<float>(order, 10 , 1 , n);
  print_gpu_array_entries<float>(r, 10 , 1 , n);

  thrust::device_ptr<float> order_(order);
  thrust::device_vector<float> order__(order_, order_ + n);
  thrust::device_ptr<float> r_(r);
  thrust::device_vector<float> r__(r_, r_ + n);

  thrust::sort_by_key(order__.begin(), order__.end(), r__.begin());

  thrust::copy(order__.begin(), order__.end(), order_);
  thrust::copy(r__.begin(), r__.end(), r_);

  print_gpu_array_entries<float>(order, 10 , 1 , n);
  print_gpu_array_entries<float>(r, 10 , 1 , n);

  cudaFree(order);

  order__.clear();
  r__.clear();
  thrust::device_vector<float>().swap(order__);
  thrust::device_vector<float>().swap(r__);

}

int main(){

  thrust::device_vector<float> data(10);
  thrust::sequence(data.begin(), data.end());
  rng_permutation(10, thrust::raw_pointer_cast(data.data()));
}
$ nvcc -o t312 t312.cu
$ ./t312
0.840188,0.394383,0.783099,0.79844,0.911647,0.197551,0.335223,0.76823,0.277775,0.55397,
0,1,2,3,4,5,6,7,8,9,
0.197551,0.277775,0.335223,0.394383,0.55397,0.76823,0.783099,0.79844,0.840188,0.911647,
5,8,6,1,9,7,2,3,0,4,
$

推荐阅读