Compare commits

...

10 Commits

Author SHA1 Message Date
Allard Hendriksen
74fc52ccac
Merge fc06e3de184464ba22f08daeb2cc98aa4f7e624f into f3b7c41ad6202902290d2bbd16428455b3ee375a 2025-02-22 21:12:47 -08:00
Jonathan Bentz
f3b7c41ad6
cudaNvSci: Update README.md fixing typo (#337)
Fixes #193
2025-02-21 09:21:43 -08:00
Jonathan Bentz
29fb758e62
conjugateGradient: Ensure allocated memory is freed (#336)
Fixes #202
2025-02-21 09:20:53 -08:00
Jonathan Bentz
3bc08136ff
Update README.md link for sortingNetworks (#335)
Fixes #302
2025-02-21 09:19:21 -08:00
Jonathan Bentz
85eefa06c4
boxFilter: Remove unused parameter (#338)
Fixes: #122
2025-02-21 09:17:45 -08:00
XSShawnZeng
c357dd1e6b
Fixing issue #321: A potential bug in memMapIPCDrv/memMapIpc.cpp (#334) 2025-02-21 09:14:25 -08:00
Jonathan Bentz
efb46383e0
Transpose: Change TILE_DIM to 32 to fix bank conflicts
Fixes #175
2025-02-20 15:46:44 -08:00
Allard Hendriksen
fc06e3de18
Make expected failures more obvious 2023-08-07 17:40:35 +02:00
Allard Hendriksen
a3b5b817e3
Use CTK-provided type for cuTensorMapEncodeTiled 2023-08-07 17:36:27 +02:00
Allard Hendriksen
5925483b33 Add TMA example 2023-06-30 17:39:53 +02:00
10 changed files with 492 additions and 30 deletions

View File

@ -77,7 +77,6 @@ int filter_radius = 14;
int nthreads = 64;
unsigned int width, height;
unsigned int *h_img = NULL;
unsigned int *d_img = NULL;
unsigned int *d_temp = NULL;
GLuint pbo; // OpenGL pixel buffer object
@ -108,11 +107,11 @@ extern "C" void computeGold(float *id, float *od, int w, int h, int n);
// These are CUDA functions to handle allocation and launching the kernels
extern "C" void initTexture(int width, int height, void *pImage, bool useRGBA);
extern "C" void freeTextures();
extern "C" double boxFilter(float *d_src, float *d_temp, float *d_dest,
extern "C" double boxFilter(float *d_temp, float *d_dest,
int width, int height, int radius, int iterations,
int nthreads, StopWatchInterface *timer);
extern "C" double boxFilterRGBA(unsigned int *d_src, unsigned int *d_temp,
extern "C" double boxFilterRGBA(unsigned int *d_temp,
unsigned int *d_dest, int width, int height,
int radius, int iterations, int nthreads,
StopWatchInterface *timer);
@ -165,7 +164,7 @@ void display() {
size_t num_bytes;
checkCudaErrors(cudaGraphicsResourceGetMappedPointer(
(void **)&d_result, &num_bytes, cuda_pbo_resource));
boxFilterRGBA(d_img, d_temp, d_result, width, height, filter_radius,
boxFilterRGBA(d_temp, d_result, width, height, filter_radius,
iterations, nthreads, kernel_timer);
checkCudaErrors(cudaGraphicsUnmapResources(1, &cuda_pbo_resource, 0));
@ -282,11 +281,7 @@ void reshape(int x, int y) {
}
void initCuda(bool useRGBA) {
// allocate device memory
checkCudaErrors(
cudaMalloc((void **)&d_img, (width * height * sizeof(unsigned int))));
checkCudaErrors(
cudaMalloc((void **)&d_temp, (width * height * sizeof(unsigned int))));
checkCudaErrors(cudaMalloc((void **)&d_temp, (width * height * sizeof(unsigned int))));
// Refer to boxFilter_kernel.cu for implementation
initTexture(width, height, h_img, useRGBA);
@ -304,11 +299,6 @@ void cleanup() {
h_img = NULL;
}
if (d_img) {
cudaFree(d_img);
d_img = NULL;
}
if (d_temp) {
cudaFree(d_temp);
d_temp = NULL;
@ -413,7 +403,7 @@ int runBenchmark() {
cudaMalloc((void **)&d_result, width * height * sizeof(unsigned int)));
// warm-up
boxFilterRGBA(d_img, d_temp, d_temp, width, height, filter_radius, iterations,
boxFilterRGBA(d_temp, d_temp, width, height, filter_radius, iterations,
nthreads, kernel_timer);
checkCudaErrors(cudaDeviceSynchronize());
@ -426,7 +416,7 @@ int runBenchmark() {
for (int i = 0; i < iCycles; i++) {
dProcessingTime +=
boxFilterRGBA(d_img, d_temp, d_img, width, height, filter_radius,
boxFilterRGBA(d_temp, d_temp, width, height, filter_radius,
iterations, nthreads, kernel_timer);
}
@ -469,7 +459,7 @@ int runSingleTest(char *ref_file, char *exec_path) {
{
printf("%s (radius=%d) (passes=%d) ", sSDKsample, filter_radius,
iterations);
boxFilterRGBA(d_img, d_temp, d_result, width, height, filter_radius,
boxFilterRGBA(d_temp, d_result, width, height, filter_radius,
iterations, nthreads, kernel_timer);
// check if kernel execution generated an error

View File

@ -399,7 +399,6 @@ extern "C" void freeTextures() {
Perform 2D box filter on image using CUDA
Parameters:
d_src - pointer to input image in device memory
d_temp - pointer to temporary storage in device memory
d_dest - pointer to destination image in device memory
width - image width
@ -408,7 +407,7 @@ extern "C" void freeTextures() {
iterations - number of iterations
*/
extern "C" double boxFilter(float *d_src, float *d_temp, float *d_dest,
extern "C" double boxFilter(float *d_temp, float *d_dest,
int width, int height, int radius, int iterations,
int nthreads, StopWatchInterface *timer) {
// var for kernel timing
@ -447,7 +446,7 @@ extern "C" double boxFilter(float *d_src, float *d_temp, float *d_dest,
}
// RGBA version
extern "C" double boxFilterRGBA(unsigned int *d_src, unsigned int *d_temp,
extern "C" double boxFilterRGBA(unsigned int *d_temp,
unsigned int *d_dest, int width, int height,
int radius, int iterations, int nthreads,
StopWatchInterface *timer) {

View File

@ -2,7 +2,7 @@
## Description
This sample implements bitonic sort and odd-even merge sort (also known as Batcher's sort), algorithms belonging to the class of sorting networks. While generally subefficient, for large sequences compared to algorithms with better asymptotic algorithmic complexity (i.e. merge sort or radix sort), this may be the preferred algorithms of choice for sorting batches of short-sized to mid-sized (key, value) array pairs. Refer to an excellent tutorial by H. W. Lang http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/networks/indexen.htm
This sample implements bitonic sort and odd-even merge sort (also known as Batcher's sort), algorithms belonging to the class of sorting networks. While generally subefficient, for large sequences compared to algorithms with better asymptotic algorithmic complexity (i.e. merge sort or radix sort), this may be the preferred algorithms of choice for sorting batches of short-sized to mid-sized (key, value) array pairs. Refer to an excellent tutorial by H. W. Lang https://hwlang.de/algorithmen/sortieren/bitonic/bitonicen.htm
## Key Concepts

View File

@ -0,0 +1,49 @@
# globalToShmemTMACopy - Global Memory to Shared Memory TMA Copy
## Description
This sample shows how to use the CUDA driver API and inline PTX assembly to copy
a 2D tile of a tensor into shared memory. It also demonstrates arrive-wait
barrier for synchronization.
## Key Concepts
CUDA Runtime API, CUDA Driver API, PTX ISA, CPP11 CUDA
## Supported SM Architectures
This sample requires compute capability 9.0 or higher.
[SM 9.0 ](https://developer.nvidia.com/cuda-gpus)
## Supported OSes
Linux, Windows, QNX
## Supported CPU Architecture
x86_64, ppc64le, armv7l, aarch64
## CUDA APIs involved
### [CUDA Runtime API](http://docs.nvidia.com/cuda/cuda-runtime-api/index.html)
cudaMalloc, cudaMemcpy, cudaFree, cudaDeviceSynchronize
### [CUDA Driver API](http://docs.nvidia.com/cuda/cuda-driver-api/index.html)
cudaMalloc, cudaMemcpy, cudaFree, cudaDeviceSynchronize
### [CUDA PTX ISA](https://docs.nvidia.com/cuda/parallel-thread-execution/index.html)
## Dependencies needed to build/run
[CPP11](../../../README.md#cpp11)
## Prerequisites
Download and install the [CUDA Toolkit 12.2](https://developer.nvidia.com/cuda-downloads) for your corresponding platform.
Make sure the dependencies mentioned in [Dependencies]() section above are installed.
## Build and Run
## References (for more details)

View File

@ -0,0 +1,354 @@
/* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* This sample demonstrates how to:
*
* - Create a TensorMap (TMA descriptor)
* - Load a 2D tile of data into shared memory
*
* Compile and run with:
*
* nvcc -arch sm_90 -run globalToShmemTMACopy.cu
*
* It can be that the compiler issues the following note. This can be safely ignored.
*
* note: the ABI for passing parameters with 64-byte alignment has changed in
* GCC 4.6
*
*/
#include <cstdio> // printf
#include <vector> // std::vector
#include <cudaTypedefs.h> // PFN_cuTensorMapEncodeTiled
#include <cuda.h> // CUtensormap
#include <cuda_awbarrier_primitives.h> // __mbarrier_*
#include "util.h" // CUDA_CHECK macro
/*
* Constants.
*/
constexpr size_t W_global = 1024; // Width of tensor (in # elements)
constexpr size_t H_global = 1024; // Height of tensor (in # elements)
constexpr int SMEM_W = 32; // Width of shared memory buffer (in # elements)
constexpr int SMEM_H = 8; // Height of shared memory buffer (in # elements)
/*
* CUDA Driver API
*/
PFN_cuTensorMapEncodeTiled get_cuTensorMapEncodeTiled() {
void* driver_ptr = nullptr;
cudaDriverEntryPointQueryResult driver_status;
CUDA_CHECK(cudaGetDriverEntryPoint("cuTensorMapEncodeTiled", &driver_ptr, cudaEnableDefault, &driver_status));
return reinterpret_cast<PFN_cuTensorMapEncodeTiled>(driver_ptr);
}
/*
* PTX wrappers
*/
inline __device__ __mbarrier_token_t barrier_arrive1_tx(
__mbarrier_t *barrier, uint32_t expected_tx_count
)
{
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#parallel-synchronization-and-communication-instructions-mbarrier-arrive
__mbarrier_token_t token;
asm volatile("mbarrier.arrive.expect_tx.release.cta.shared::cta.b64 %0, [%1], %2;"
: "=l"(token)
: "r"(static_cast<unsigned int>(__cvta_generic_to_shared(barrier))), "r"(expected_tx_count)
: "memory");
return token;
}
inline __device__ bool barrier_try_wait_token(__mbarrier_t *barrier, __mbarrier_token_t token)
{
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#parallel-synchronization-and-communication-instructions-mbarrier-test-wait-try-wait
//
// This function returns a bool, so that software can retry.
//
// The HW only provides best-effort waiting support. The wait time is limited
// by the HW capability, after which a fail occurs, in which case the SW is
// responsible for retrying.
int __ready;
asm volatile("{\n\t"
".reg .pred p;\n\t"
"mbarrier.try_wait.acquire.cta.shared::cta.b64 p, [%1], %2;\n\t"
"selp.b32 %0, 1, 0, p;\n\t"
"}"
: "=r"(__ready)
: "r"(static_cast<unsigned int>(__cvta_generic_to_shared(barrier))),
"l"(token)
: "memory");
return __ready;
}
inline __device__ void cp_async_bulk_tensor_2d(
__mbarrier_t *barrier, void *dst, int access_coord_x, int access_coord_y, const CUtensorMap *tensor_desc)
{
unsigned smem_int_ptr = static_cast<unsigned int>(__cvta_generic_to_shared(dst));
unsigned smem_barrier_int_ptr = static_cast<unsigned int>(__cvta_generic_to_shared(barrier));
uint64_t tensor_desc_ptr = reinterpret_cast<uint64_t>(tensor_desc);
asm volatile(
"cp.async.bulk.tensor.2d.shared::cluster.global.tile.mbarrier::complete_tx::bytes "
"[%0], [%1, {%2, %3}], [%4];\n"
:
: "r"(smem_int_ptr),
"l"(tensor_desc_ptr),
"r"(access_coord_x),
"r"(access_coord_y),
"r"(smem_barrier_int_ptr)
: "memory");
}
// Layout of shared memory. It contains:
//
// - a buffer to hold a subset of a tensor,
// - a shared memory barrier.
template <int H, int W>
struct smem_t {
// The destination shared memory buffer of a bulk tensor operation should be
// 128 byte aligned.
struct alignas(128) tensor_buffer {
int data[H][W];
__device__ constexpr int width() {return W;}
__device__ constexpr int height() {return H;}
};
tensor_buffer buffer;
// Put the barrier behind the tensor buffer to prevent 100+ bytes of padding.
__mbarrier_t bar;
__device__ constexpr int buffer_size_in_bytes() {
return sizeof(tensor_buffer::data);
}
};
/*
* Main kernel: takes a TMA descriptor and two coordinates.
*
* Loads a tile into shared memory using TMA and prints the tile.
*
*/
__global__ void kernel(const __grid_constant__ CUtensorMap tma_desc, int x_0, int y_0) {
/*
* ***NOTE***:
A CUtensorMap can only be passed as a `const __grid_constant__`
parameter. Passing a CUtensorMap in any other way from the host to
device can result in difficult if not impossible to debug failures.
*/
// Declare shared memory to hold tensor buffer and shared memory barrier.
__shared__ smem_t<SMEM_H, SMEM_W> smem;
// Utility variable to elect a leader thread.
bool leader = threadIdx.x == 0;
if (leader) {
// Initialize barrier. We will participate in the barrier with `blockDim.x`
// threads.
__mbarrier_init(&smem.bar, blockDim.x);
}
// Syncthreads so initialized barrier is visible to all threads.
__syncthreads();
// This token is created when arriving on the shared memory barrier. It is
// used again when waiting on the barrier.
__mbarrier_token_t token;
// Load first batch
if (leader) {
// Initiate bulk tensor copy.
cp_async_bulk_tensor_2d(&smem.bar, &smem.buffer.data, x_0, y_0, &tma_desc);
// Arrive with arrival count of 1 and expected transaction count equal to
// the number of bytes that are copied by cp_async_bulk_tensor_2d.
token = barrier_arrive1_tx(&smem.bar, smem.buffer_size_in_bytes());
} else {
// Other threads arrive with arrival count of 1 and expected tx count of 0.
token = barrier_arrive1_tx(&smem.bar, 0);
}
// The barrier will flip when the following two conditions have been met:
//
// - Its arrival count reaches blockDim.x (see __mbarrier_init above).
// Typically, each thread will arrive with an arrival count of one so this
// indicates that all threads have arrived.
//
// - Its expected transaction count reaches smem.buffer_size_in_bytes(). The
// bulk tensor operation will increment the transaction count as it copies
// bytes.
// Wait for barrier to flip. Try_wait puts the thread to sleep while waiting.
// It is woken up when the barrier flips or when a hardware-defined number of
// clock cycles have passed. In the second case, we retry waiting.
while(! barrier_try_wait_token(&smem.bar, token)) { };
// From this point onwards, the data in smem.buffer is readable by all threads
// participating the in the barrier.
// Print the data:
if (leader) {
printf("\n\nPrinting tile at coordinates x0 = %d, y0 = %d\n", x_0, y_0);
// Print global x coordinates
printf("global->\t");
for (int x = 0; x < smem.buffer.width(); ++x) {
printf("[%4d] ", x_0 + x);
}
printf("\n");
// Print local x coordinates
printf("local ->\t");
for (int x = 0; x < smem.buffer.width(); ++x) {
printf("[%4d] ", x);
}
printf("\n");
for (int y = 0; y < smem.buffer.height(); ++y) {
// Print global and local y coordinates
printf("[%4d] [%2d]\t", y_0 + y, y);
for (int x = 0; x < smem.buffer.width(); ++x) {
printf(" %4d ", smem.buffer.data[y][x]);
}
printf("\n");
}
// Invalidate barrier. If further computations were to take place in the
// kernel, this allows the memory location of the shared memory barrier to
// be repurposed.
__mbarrier_inval(&smem.bar);
}
}
int main(int argc, char **argv) {
// Create a 2D tensor in GPU global memory containing linear indices 0, 1, 2, ... .
// The data layout is row-major.
// First fill in a vector on the host.
std::vector<int> tensor_host(H_global * W_global);
for (int i = 0; i < H_global * W_global; ++i) {
tensor_host[i] = i;
}
// Move it to device
int * tensor = nullptr;
CUDA_CHECK(cudaMalloc(&tensor, H_global * W_global * sizeof(int)));
CUDA_CHECK(cudaMemcpy(tensor, tensor_host.data(), H_global * W_global * sizeof(int), cudaMemcpyHostToDevice));
// Set up parameters to create TMA descriptor.
// https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__TENSOR__MEMORY.html
CUtensorMap tma_desc{};
CUtensorMapDataType dtype = CUtensorMapDataType::CU_TENSOR_MAP_DATA_TYPE_INT32;
auto rank = 2;
uint64_t size[rank] = {W_global, H_global};
// The stride is the number of bytes to traverse from the first element of one row to the next.
// It must be a multiple of 16.
uint64_t stride[rank - 1] = {W_global * sizeof(int)};
// The box_size is the size of the shared memory buffer that is used as the destination of a TMA transfer.
uint32_t box_size[rank] = {SMEM_W, SMEM_H};
// The distance between elements in units of sizeof(element). A stride of 2
// can be used to load only the real component of a complex-valued tensor, for instance.
uint32_t elem_stride[rank] = {1, 1};
// Interleave patterns are sometimes used to accelerate loading of values that
// are less than 4 bytes long.
CUtensorMapInterleave interleave = CUtensorMapInterleave::CU_TENSOR_MAP_INTERLEAVE_NONE;
// Swizzling can be used to avoid shared memory bank conflicts.
CUtensorMapSwizzle swizzle = CUtensorMapSwizzle::CU_TENSOR_MAP_SWIZZLE_NONE;
CUtensorMapL2promotion l2_promotion = CUtensorMapL2promotion::CU_TENSOR_MAP_L2_PROMOTION_NONE;
// Any element that is outside of bounds will be set to zero by the TMA transfer.
CUtensorMapFloatOOBfill oob_fill = CUtensorMapFloatOOBfill::CU_TENSOR_MAP_FLOAT_OOB_FILL_NONE;
// Get a function pointer to the cuTensorMapEncodeTiled driver API.
auto cuTensorMapEncodeTiled = get_cuTensorMapEncodeTiled();
// Create the tensor descriptor.
CUresult res = cuTensorMapEncodeTiled(
&tma_desc, // CUtensorMap *tensorMap,
dtype, // CUtensorMapDataType tensorDataType,
rank, // cuuint32_t tensorRank,
tensor, // void *globalAddress,
size, // const cuuint64_t *globalDim,
stride, // const cuuint64_t *globalStrides,
box_size, // const cuuint32_t *boxDim,
elem_stride, // const cuuint32_t *elementStrides,
interleave, // CUtensorMapInterleave interleave,
swizzle, // CUtensorMapSwizzle swizzle,
l2_promotion, // CUtensorMapL2promotion l2Promotion,
oob_fill // CUtensorMapFloatOOBfill oobFill);
);
// Print the result. Should be zero.
printf("cuTensorMapEncodeTiled returned CUresult: %d\n\n", res);
CUDA_CHECK(cudaDeviceSynchronize());
dim3 grid(1);
dim3 block(128);
printf("Print the top right corner tile of the tensor:\n");
kernel<<<grid, block>>>(tma_desc, 0, 0);
CUDA_CHECK(cudaDeviceSynchronize());
printf("Negative indices work:\n");
kernel<<<grid, block>>>(tma_desc, -4, 0);
CUDA_CHECK(cudaDeviceSynchronize());
printf("When the indices are out of bounds, the shared memory buffer is filled with zeros:\n");
kernel<<<grid, block>>>(tma_desc, W_global, H_global);
CUDA_CHECK(cudaDeviceSynchronize());
printf(
"\n**NOTE**: The following code will fail.\n "
"\nCare must be taken to ensure that the coordinates result in a memory offset\n"
"that is aligned to 16 bytes. With 32 bit integer elements, x coordinates\n"
"that are not a multiple of 4 result in a non-recoverable error:\n\n"
);
kernel<<<grid, block>>>(tma_desc, 1, 0);
CUDA_REPORT(cudaDeviceSynchronize());
kernel<<<grid, block>>>(tma_desc, 2, 0);
CUDA_REPORT(cudaDeviceSynchronize());
kernel<<<grid, block>>>(tma_desc, 3, 0);
CUDA_REPORT(cudaDeviceSynchronize());
CUDA_REPORT(cudaFree(tensor));
return 0;
}

View File

@ -0,0 +1,64 @@
/* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <cuda_runtime.h>
#include <cstdio>
#include <cstdlib>
// Macro for checking cuda errors following a cuda launch or api call
#define CUDA_CHECK(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA error: %s %s %d\n", cudaGetErrorString(code), file,
line);
exit(code);
}
}
// Macro to report cuda errors following a cuda launch or api call
#define CUDA_REPORT(ans) \
{ \
gpuReport((ans), __FILE__, __LINE__); \
}
inline void gpuReport(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
fprintf(stderr, "CUDA error (as expected): %s %s %d\n", cudaGetErrorString(code), file,
line);
}
}

View File

@ -493,12 +493,14 @@ static void parentProcess(char *app) {
continue;
}
for (int j = 0; j < nprocesses; j++) {
for (int j = 0; j < selectedDevices.size(); j++) {
int canAccessPeerIJ, canAccessPeerJI;
checkCudaErrors(
cuDeviceCanAccessPeer(&canAccessPeerJI, devices[j], devices[i]));
checkCudaErrors(
cuDeviceCanAccessPeer(&canAccessPeerIJ, devices[i], devices[j]));
checkCudaErrors(cuDeviceCanAccessPeer(&canAccessPeerJI,
devices[selectedDevices[j]],
devices[i]));
checkCudaErrors(cuDeviceCanAccessPeer(&canAccessPeerIJ,
devices[i],
devices[selectedDevices[j]]));
if (!canAccessPeerIJ || !canAccessPeerJI) {
allPeers = false;
break;
@ -513,10 +515,10 @@ static void parentProcess(char *app) {
// setup the peers for the device. For systems that only allow 8
// peers per GPU at a time, this acts to remove devices from CanAccessPeer
for (int j = 0; j < nprocesses; j++) {
checkCudaErrors(cuCtxSetCurrent(ctxs[i]));
checkCudaErrors(cuCtxSetCurrent(ctxs.back()));
checkCudaErrors(cuCtxEnablePeerAccess(ctxs[j], 0));
checkCudaErrors(cuCtxSetCurrent(ctxs[j]));
checkCudaErrors(cuCtxEnablePeerAccess(ctxs[i], 0));
checkCudaErrors(cuCtxEnablePeerAccess(ctxs.back(), 0));
}
selectedDevices.push_back(i);
nprocesses++;

View File

@ -231,6 +231,10 @@ int main(int argc, char **argv) {
}
}
if (buffer) {
checkCudaErrors(cudaFree(buffer));
}
cusparseDestroy(cusparseHandle);
cublasDestroy(cublasHandle);
if (matA) {

View File

@ -2,7 +2,7 @@
## Description
This sample demonstrates CUDA-NvSciBuf/NvSciSync Interop. Two CPU threads import the NvSciBuf and NvSciSync into CUDA to perform two image processing algorithms on a ppm image - image rotation in 1st thread &amp;amp;amp;amp;amp;amp;amp;amp;amp;amp;amp;amp;amp; rgba to grayscale conversion of rotated image in 2nd thread. Currently only supported on Ubuntu 18.04
This sample demonstrates CUDA-NvSciBuf/NvSciSync Interop. Two CPU threads import the NvSciBuf and NvSciSync into CUDA to perform two image processing algorithms on a ppm image - image rotation in 1st thread &amp; rgba to grayscale conversion of rotated image in 2nd thread. Currently only supported on Ubuntu 18.04
## Key Concepts

View File

@ -53,7 +53,7 @@ const char *sSDKsample = "Transpose";
// TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of
// BLOCK_ROWS
#define TILE_DIM 16
#define TILE_DIM 32
#define BLOCK_ROWS 16
// This sample assumes that MATRIX_SIZE_X = MATRIX_SIZE_Y