Click here to Skip to main content
15,878,852 members
Please Sign up or sign in to vote.
1.00/5 (3 votes)
See more:
Bandwidth test - test memory bandwidth.

Especially important for PCIE capability. Different MB has different PCIE capability.

The CUDA adaptor performance is depend on the capability of PCIE. It could be the performance bottleneck.

On the following programming drills, the number of clock cycles necessary for computation and utilised memory bandwidth have to be computing.

(1) parallelization in the programs - using 256 threads

(2) improving the memory access modes

(3) testing the parallelization by using 512/1024

(4) utilizing BLOCKS in the computation

(5) utilizing shared memory

(6) improving the computation oerfdormance by using a Treesum algorithm

(7) resolving the memory band conflict issue, encountered in applying Treesum algorithm with the shared memory

What I have tried:

My own coding sample however have some errors -



C++
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#include <math.h>

int main()
{
	float *a, *b, *c, *d;
	int n = 1000;

	if (!InitCUDA())
		return 0;
	a = (float*)malloc(sizeof(float)* n * n);
	b = (float*)malloc(sizeof(float)* n * n);
	c = (float*)malloc(sizeof(float)* n * n);
	d = (float*)malloc(sizeof(float)* n * n);

	srand(0);
	matgen(a, n, n);
	matgen(b, n, n);

	clock_t time = matmultCUDA(a, n, b, n, c, n, n);
	matmult(a, n, b, n, d, n, n);
	compare_mat(c, n, d, n, n);

	double sec = (double)time / CLOCKS_PER_SEC;

	printf("Time used: %.2f (%.2lf GFLOPS)\n", sec, 2.0 * n * n * n / (sec * 1E9));

	return 0;
}
void matgen(float* a, int lda, int n)
{
	int i, j;
	for (i = 0; i < n; i++)
	{
		for (j = 0; j < n; j++)
		{
			a[i* lda + j] = (float)rand() / RAND_MAX + (float)rand() / (RAND_MAX * RAND_MAX);
		}
	}
}

void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
	int i, j, k;

	for (i = 0; i< n; i++)
	{
		for (j = 0; j < n; j++)
		{
			double t = 0;
			for (k = 0; k < n; k++) {
				t += a[i* lda + k] * b[k * ldb + j];
			}
			c[i* ldc + j] = t;
		}
	}

	void compare_mat(const float* a, int lda, const float* b, int ldb, int n)
	{
		float max_err = 0;
		float average_err = 0; inti, j;
		for (i = 0; i< n; i++)
		{
			for (j = 0; j < n; j++)
			{
				if (b[i* ldb + j] != 0)
				{
					float err = fabs((a[i* lda + j] - b[i* ldb + j]) / b[i* ldb + j]);
					if (max_err< err) max_err = err;
					average_err += err;
				}
			}
		}
		printf("Max error: %g Average error:%g\n", max_err, average_err / (n * n));
	}

#define NUM_THREADS 256

clock_t matmultCUDA(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n)
{
	float *ac, *bc, *cc;
	clock_tstart, end;

	start = clock();
	cudaMalloc((void**)&ac, sizeof(float)* n * n);
	cudaMalloc((void**)&bc, sizeof(float)* n * n);
	cudaMalloc((void**)&cc, sizeof(float)* n * n);

	cudaMemcpy2D(ac, sizeof(float)* n, a, sizeof(float)* lda, sizeof(float)* n, n, cudaMemcpyHostToDevice);
	cudaMemcpy2D(bc, sizeof(float)* n, b, sizeof(float)* ldb, sizeof(float)* n, n, cudaMemcpyHostToDevice);

	intblocks = (n + NUM_THREADS - 1) / NUM_THREADS;
	matMultCUDA<<<blocks * n, NUM_THREADS >>>(ac, n, bc, n, cc, n, n);

	cudaMemcpy2D(c, sizeof(float)* ldc, cc, sizeof(float)* n, sizeof(float)* n, n, cudaMemcpyDeviceToHost);

	cudaFree(ac);
	cudaFree(bc);
	cudaFree(cc);

	end = clock();
	return end - start;
}

__global__ static void matMultCUDA(const float* a, size_t lda, const float* b, size_t ldb, float* c, size_t ldc, int n)
{
	__shared__ float
	matA[BLOCK_SIZE][BLOCK_SIZE];
	__shared__ float matB[BLOCK_SIZE][BLOCK_SIZE];
	constinttidc = threadIdx.x;
	constinttidr = threadIdx.y;
	constintbidc = blockIdx.x* BLOCK_SIZE;
	constintbidr = blockIdx.y* BLOCK_SIZE;
	int i, j;

	float results = 0;
	float comp = 0;

	for (j = 0; j < n; j += BLOCK_SIZE)
	{
		matA[tidr][tidc] = a[(tidr + bidr) * lda + tidc + j];
		matB[tidr][tidc] = b[(tidr + j) * ldb + tidc + bidc];

		__syncthreads();

		for (i = 0; i< BLOCK_SIZE; i++)
		{
			float t; comp -= matA[tidr][i] * matB[i][tidc];
			t = results - comp; comp = (t - results) + comp; results = t;
		}

		__syncthreads();
	}

	c[(tidr + bidr) * ldc + tidc + bidc] = results;
}
Posted
Updated 20-Apr-18 2:38am
v2
Comments
OriginalGriff 20-Apr-18 3:40am    
What errors?
Where are they?
Are there any messages?
How did you get them? What did you do to show up teh errors?
What have you tried to fix them?
Where are you stuck?
What help do you need?

Use the "Improve question" widget to edit your question and provide better information.

1 solution

It is better you tell me about the errors not me tell you all the answers
 
Share this answer
 
Comments
Patrice T 20-Apr-18 8:53am    
Use the 'Reply' button on right of Member name for a discussion.
Advantage, the member is notified.
Please delete this non solution.
Patrice T 20-Apr-18 8:54am    
If you give hints about the error, we can understand what is wrong without running your code.
Richard Deeming 20-Apr-18 10:16am    
You know what would be better? If you lost the attitude, and tried to help us to help you.

If you want us to help you fix an error, then you need to tell us what the error is. If you can't be bothered to do that, then why should we bother trying to help you? There are plenty of other people here who do want our help, and are willing to work with us to fix their problems.
Rick York 20-Apr-18 16:30pm    
That seems unlikely since that account has been closed. Oh well.

This content, along with any associated source code and files, is licensed under The Code Project Open License (CPOL)



CodeProject, 20 Bay Street, 11th Floor Toronto, Ontario, Canada M5J 2N8 +1 (416) 849-8900