CUDA中threadIdx、blockIdx、blockDim和gridDim的区别

一、区别和使用案例

  • threadIdx是一个uint3类型,表示一个线程的索引。
  • blockIdx是一个uint3类型,表示一个线程块的索引,一个线程块中通常有多个线程。
  • blockDim是一个dim3类型,表示线程块的大小,一个线程块中有多个线程。
  • gridDim是一个dim3类型,表示网格的大小,一个网格中通常有多个线程块。

cuda 通过<<< >>>符号来分配索引线程的方式,下面展示15种索引方式:

#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include <stdio.h>
#include <stdlib.h>
#include <iostream>

using namespace std;

//thread 1D
__global__ void testThread1(int *c, const int *a, const int *b)
{
    int i = threadIdx.x;
    c[i] = b[i] - a[i];
}

//thread 2D
__global__ void testThread2(int *c, const int *a, const int *b)
{
    int i = threadIdx.x + threadIdx.y*blockDim.x;
    c[i] = b[i] - a[i];
}

//thread 3D
__global__ void testThread3(int *c, const int *a, const int *b)
{
    int i = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.x*blockDim.y;
    c[i] = b[i] - a[i];
}

//block 1D
__global__ void testBlock1(int *c, const int *a, const int *b)
{
    int i = blockIdx.x;
    c[i] = b[i] - a[i];
}

//block 2D
__global__ void testBlock2(int *c, const int *a, const int *b)
{
    int i = blockIdx.x + blockIdx.y*gridDim.x;
    c[i] = b[i] - a[i];
}

//block 3D
__global__ void testBlock3(int *c, const int *a, const int *b)
{
    int i = blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y;
    c[i] = b[i] - a[i];
}

//block-thread 1D-1D
__global__ void testBlockThread1(int *c, const int *a, const int *b)
{
    int i = threadIdx.x + blockDim.x*blockIdx.x;
    c[i] = b[i] - a[i];
}

//block-thread 1D-2D
__global__ void testBlockThread2(int *c, const int *a, const int *b)
{
    int threadId_2D = threadIdx.x + threadIdx.y*blockDim.x;
    int i = threadId_2D+ (blockDim.x*blockDim.y)*blockIdx.x;
    c[i] = b[i] - a[i];
}

//block-thread 1D-3D
__global__ void testBlockThread3(int *c, const int *a, const int *b)
{
    int threadId_3D = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.x*blockDim.y;
    int i = threadId_3D + (blockDim.x*blockDim.y*blockDim.z)*blockIdx.x;
    c[i] = b[i] - a[i];
}

//block-thread 2D-1D
__global__ void testBlockThread4(int *c, const int *a, const int *b)
{
    int blockId_2D = blockIdx.x + blockIdx.y*gridDim.x;
    int i = threadIdx.x + blockDim.x*blockId_2D;
    c[i] = b[i] - a[i];
}

//block-thread 3D-1D
__global__ void testBlockThread5(int *c, const int *a, const int *b)
{
    int blockId_3D = blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y;
    int i = threadIdx.x + blockDim.x*blockId_3D;
    c[i] = b[i] - a[i];
}

//block-thread 2D-2D
__global__ void testBlockThread6(int *c, const int *a, const int *b)
{
    int threadId_2D = threadIdx.x + threadIdx.y*blockDim.x;
    int blockId_2D = blockIdx.x + blockIdx.y*gridDim.x;
    int i = threadId_2D + (blockDim.x*blockDim.y)*blockId_2D;
    c[i] = b[i] - a[i];
}

//block-thread 2D-3D
__global__ void testBlockThread7(int *c, const int *a, const int *b)
{
    int threadId_3D = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.x*blockDim.y;
    int blockId_2D = blockIdx.x + blockIdx.y*gridDim.x;
    int i = threadId_3D + (blockDim.x*blockDim.y*blockDim.z)*blockId_2D;
    c[i] = b[i] - a[i];
}

//block-thread 3D-2D
__global__ void testBlockThread8(int *c, const int *a, const int *b)
{
    int threadId_2D = threadIdx.x + threadIdx.y*blockDim.x;
    int blockId_3D = blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y;
    int i = threadId_2D + (blockDim.x*blockDim.y)*blockId_3D;
    c[i] = b[i] - a[i];
}

//block-thread 3D-3D
__global__ void testBlockThread9(int *c, const int *a, const int *b)
{
    int threadId_3D = threadIdx.x + threadIdx.y*blockDim.x + threadIdx.z*blockDim.x*blockDim.y;
    int blockId_3D = blockIdx.x + blockIdx.y*gridDim.x + blockIdx.z*gridDim.x*gridDim.y;
    int i = threadId_3D + (blockDim.x*blockDim.y*blockDim.z)*blockId_3D;
    c[i] = b[i] - a[i];
}


void addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
    int *dev_a = 0;
    int *dev_b = 0;
    int *dev_c = 0;

    cudaSetDevice(0);

    cudaMalloc((void**)&dev_c, size * sizeof(int));
    cudaMalloc((void**)&dev_a, size * sizeof(int));
    cudaMalloc((void**)&dev_b, size * sizeof(int));

    cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
    cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);

    //testThread1<<<1, size>>>(dev_c, dev_a, dev_b);

    //uint3 s;s.x = size/5;s.y = 5;s.z = 1;
    //testThread2 <<<1,s>>>(dev_c, dev_a, dev_b);

    //uint3 s; s.x = size / 10; s.y = 5; s.z = 2;
    //testThread3<<<1, s >>>(dev_c, dev_a, dev_b);

    //testBlock1<<<size,1 >>>(dev_c, dev_a, dev_b);

    //uint3 s; s.x = size / 5; s.y = 5; s.z = 1;
    //testBlock2<<<s, 1 >>>(dev_c, dev_a, dev_b);

    //uint3 s; s.x = size / 10; s.y = 5; s.z = 2;
    //testBlock3<<<s, 1 >>>(dev_c, dev_a, dev_b);

    //testBlockThread1<<<size/10, 10>>>(dev_c, dev_a, dev_b);

    //uint3 s1; s1.x = size / 100; s1.y = 1; s1.z = 1;
    //uint3 s2; s2.x = 10; s2.y = 10; s2.z = 1;
    //testBlockThread2 << <s1, s2 >> >(dev_c, dev_a, dev_b);

    //uint3 s1; s1.x = size / 100; s1.y = 1; s1.z = 1;
    //uint3 s2; s2.x = 10; s2.y = 5; s2.z = 2;
    //testBlockThread3 << <s1, s2 >> >(dev_c, dev_a, dev_b);

    //uint3 s1; s1.x = 10; s1.y = 10; s1.z = 1;
    //uint3 s2; s2.x = size / 100; s2.y = 1; s2.z = 1;
    //testBlockThread4 << <s1, s2 >> >(dev_c, dev_a, dev_b);

    //uint3 s1; s1.x = 10; s1.y = 5; s1.z = 2;
    //uint3 s2; s2.x = size / 100; s2.y = 1; s2.z = 1;
    //testBlockThread5 << <s1, s2 >> >(dev_c, dev_a, dev_b);

    //uint3 s1; s1.x = size / 100; s1.y = 10; s1.z = 1;
    //uint3 s2; s2.x = 5; s2.y = 2; s2.z = 1;
    //testBlockThread6 << <s1, s2 >> >(dev_c, dev_a, dev_b);

    //uint3 s1; s1.x = size / 100; s1.y = 5; s1.z = 1;
    //uint3 s2; s2.x = 5; s2.y = 2; s2.z = 2;
    //testBlockThread7 << <s1, s2 >> >(dev_c, dev_a, dev_b);

    //uint3 s1; s1.x = 5; s1.y = 2; s1.z = 2;
    //uint3 s2; s2.x = size / 100; s2.y = 5; s2.z = 1;
    //testBlockThread8 <<<s1, s2 >>>(dev_c, dev_a, dev_b);

    uint3 s1; s1.x = 5; s1.y = 2; s1.z = 2;
    uint3 s2; s2.x = size / 200; s2.y = 5; s2.z = 2;
    testBlockThread9<<<s1, s2 >>>(dev_c, dev_a, dev_b);

    cudaMemcpy(c, dev_c, size*sizeof(int), cudaMemcpyDeviceToHost);

    cudaFree(dev_a);
    cudaFree(dev_b);
    cudaFree(dev_c);

    cudaGetLastError();
}


int main()
{
    const int n = 1000;

    int *a = new int[n];
    int *b = new int[n];
    int *c = new int[n];
    int *cc = new int[n];

    for (int i = 0; i < n; i++)
    {
        a[i] = rand() % 100;
        b[i] = rand() % 100;
        c[i] = b[i] - a[i];
    }

    addWithCuda(cc, a, b, n);

    FILE *fp = fopen("out.txt", "w");
    for (int i = 0; i < n; i++)
        fprintf(fp, "%d %d\n", c[i], cc[i]);
    fclose(fp);

    bool flag = true;
    for (int i = 0; i < n; i++)
    {
        if (c[i] != cc[i])
        {
            flag = false;
            break;
        }
    }

    if (flag == false)
        printf("no pass");
    else
        printf("pass");

    cudaDeviceReset();

    delete[] a;
    delete[] b;
    delete[] c;
    delete[] cc;

    getchar();
    return 0;
}

Full global thread ID in x and y dimensions can be computed by:

 

二、参考

 

0

大根堆/小根堆的实现

#include <iostream>
#include <vector>

using namespace std;

template<typename P>

class Heap {
private:
    vector<P> heap_elem{-1};//堆容器, 使得堆的编号从1开始
    int maxmin_heap;//选择大\小根堆

    void up_adjust(int now) {//上浮调整(默认大根堆)
        while (now > 1 && heap_elem[now] > heap_elem[now / 2]) {
            swap(heap_elem[now], heap_elem[now / 2]);
            now = now / 2;
        }
    };

    void down_adjust(P &top, int now) {//下沉调整
        heap_elem[now] = top;
        int next;
        while (now * 2 <= heap_elem.size() - 1) {
            if (now * 2 + 1 > heap_elem.size() - 1) {
                next = now * 2;
            } else {
                next = heap_elem[now * 2] > heap_elem[now * 2 + 1] ? now * 2 : now * 2 + 1;
            }
            if (heap_elem[next] > heap_elem[now]) {
                swap(heap_elem[next], heap_elem[now]);
                now = next;
            } else {
                break;
            }
        }
    };

public:
    Heap(int m) {//构造堆
        maxmin_heap = m;//default max_heap
    };

    P getTop() {//弹获取堆顶元素
        if (heap_elem.size() > 1)
            return heap_elem[1] * maxmin_heap;
        return heap_elem[0];//堆为空
    }

    bool empty() {//判断堆空
        return heap_elem.size() == 1;
    }

    void push(P x) {//送元素入堆
        heap_elem.push_back(x * maxmin_heap);
        int now = heap_elem.size() - 1;
        up_adjust(now);
    };

    P pop() {//堆顶元素弹出
        P res = getTop();
        P top = heap_elem.back();
        heap_elem.pop_back();
        int now = 1;
        down_adjust(top, now);
        cout << "pop " << res << " successfully." << endl;
        return res;
    };

    int size() {
        return heap_elem.size() - 1;
    }

    void print() {
        for (int i = 1; i < heap_elem.size(); i++) {
            cout << heap_elem[i] * maxmin_heap << ' ';
        }
        cout << endl;
    }

};

int main() {
    //Topk测试没问题
    int max_heap = 1, min_heap = -1;
    Heap<int> h(min_heap);//可更改为min_heap,来构建小根堆
    int x;
    for (int i = 0; i < 10; ++i) {
        x = rand() % 100;
        h.push(x);
        cout << "x: " << x << endl;
        h.print();
    }
    h.pop();
    h.print();
    return 0;
}

 

0