How can i make GPU process much faster than CPU process with CUDA 10.0 in Visual Studio 2017?












-1















Smart developer!
I am the beginner of CUDA programming and I have a big problem with my code.



Following code is a sample code from Nvidia and I changed a little bit for showing the GPU process much faster than from CPU process. However, after compiling this code, I got a unexpected result from that CPU process is much faster than GPU process.



This is my laptop gpu info.



This is my cuda code for Visual Studio 2017.



===========================================================================



#define N 10



This is add2 function() from GPU process



`___global____  void add2(int *a, int *b, int *c) {`

// GPU block from grid sector
//int tid = blockIdx.x; // checking the data of index = if you

insert min of N, you will get slow result from CPU. But if you put big number, this show much faster than CPU

// GPU thread
//int tid = threadIdx.x; // Same result as blockIdx.x

// GPU unexpected vector // Same result as above
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) {
c[tid] = a[tid] + b[tid];
}
}


This is add function() from CPU process



`void add(int *a, int *b, int *c) {

int tid = 0;

while (tid < N) {
c[tid] = a[tid] + b[tid];
tid += 1;
}
}


This is Main function()



int main() {

// Values for time duration
LARGE_INTEGER tFreq, tStart, tEnd;
cudaEvent_t start, stop;
float tms, ms;

int a[N], b[N], c[N]; // CPU values
int *dev_a, *dev_b, *dev_c; // GPU values----------------------------------------------

// Creating alloc for GPU--------------------------------------------------------------
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));

// Fill 'a' and 'b' from CPU
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i * i;
}

// Copy values of CPU to GPU values----------------------------------------------------
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);


//////////////////////////////////////
QueryPerformanceFrequency(&tFreq); // Frequency set
QueryPerformanceCounter(&tStart); // Time count Start

// CPU operation
add(a, b, c);

//////////////////////////////////////
QueryPerformanceCounter(&tEnd); // TIme count End
tms = ((tEnd.QuadPart - tStart.QuadPart) / (float)tFreq.QuadPart) * 1000;
//////////////////////////////////////

// show result of CPU
cout << fixed;
cout.precision(10);
cout << "CPU Time=" << tms << endl << endl;

for (int i = 0; i < N; i++) {
printf("CPU calculate = %d + %d = %dn", a[i], b[i], c[i]);
}

cout << endl;

///////////////////////////////////////
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// GPU operatinog---------------------------------------------------------------------
//add2 <<<N,1 >>> (dev_a, dev_b, dev_c); // block
//add2 << <1,N >> > (dev_a, dev_b, dev_c); // Thread
add2 << <N/32+1, 32 >> > (dev_a, dev_b, dev_c); // grid

///////////////////////////////////////
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
///////////////////////////////////////

// show result of GPU
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
cout << fixed;
cout.precision(10);
cout << "GPU Time=" << ms << endl << endl;


for (int i = 0; i < N; i++) {
printf("GPU calculate = %d + %d = %dn", a[i], b[i], c[i]);
}

//Free GPU values
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);

return 0;
}


This is result of compiling this code.



I want to make GPU process much faster than CPU process.










share|improve this question




















  • 1





    OP has acknowledged in cross posting here that they can get a different comparison between CPU and GPU by making some modifications to their code.

    – Robert Crovella
    Nov 19 '18 at 15:33
















-1















Smart developer!
I am the beginner of CUDA programming and I have a big problem with my code.



Following code is a sample code from Nvidia and I changed a little bit for showing the GPU process much faster than from CPU process. However, after compiling this code, I got a unexpected result from that CPU process is much faster than GPU process.



This is my laptop gpu info.



This is my cuda code for Visual Studio 2017.



===========================================================================



#define N 10



This is add2 function() from GPU process



`___global____  void add2(int *a, int *b, int *c) {`

// GPU block from grid sector
//int tid = blockIdx.x; // checking the data of index = if you

insert min of N, you will get slow result from CPU. But if you put big number, this show much faster than CPU

// GPU thread
//int tid = threadIdx.x; // Same result as blockIdx.x

// GPU unexpected vector // Same result as above
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) {
c[tid] = a[tid] + b[tid];
}
}


This is add function() from CPU process



`void add(int *a, int *b, int *c) {

int tid = 0;

while (tid < N) {
c[tid] = a[tid] + b[tid];
tid += 1;
}
}


This is Main function()



int main() {

// Values for time duration
LARGE_INTEGER tFreq, tStart, tEnd;
cudaEvent_t start, stop;
float tms, ms;

int a[N], b[N], c[N]; // CPU values
int *dev_a, *dev_b, *dev_c; // GPU values----------------------------------------------

// Creating alloc for GPU--------------------------------------------------------------
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));

// Fill 'a' and 'b' from CPU
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i * i;
}

// Copy values of CPU to GPU values----------------------------------------------------
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);


//////////////////////////////////////
QueryPerformanceFrequency(&tFreq); // Frequency set
QueryPerformanceCounter(&tStart); // Time count Start

// CPU operation
add(a, b, c);

//////////////////////////////////////
QueryPerformanceCounter(&tEnd); // TIme count End
tms = ((tEnd.QuadPart - tStart.QuadPart) / (float)tFreq.QuadPart) * 1000;
//////////////////////////////////////

// show result of CPU
cout << fixed;
cout.precision(10);
cout << "CPU Time=" << tms << endl << endl;

for (int i = 0; i < N; i++) {
printf("CPU calculate = %d + %d = %dn", a[i], b[i], c[i]);
}

cout << endl;

///////////////////////////////////////
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// GPU operatinog---------------------------------------------------------------------
//add2 <<<N,1 >>> (dev_a, dev_b, dev_c); // block
//add2 << <1,N >> > (dev_a, dev_b, dev_c); // Thread
add2 << <N/32+1, 32 >> > (dev_a, dev_b, dev_c); // grid

///////////////////////////////////////
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
///////////////////////////////////////

// show result of GPU
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
cout << fixed;
cout.precision(10);
cout << "GPU Time=" << ms << endl << endl;


for (int i = 0; i < N; i++) {
printf("GPU calculate = %d + %d = %dn", a[i], b[i], c[i]);
}

//Free GPU values
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);

return 0;
}


This is result of compiling this code.



I want to make GPU process much faster than CPU process.










share|improve this question




















  • 1





    OP has acknowledged in cross posting here that they can get a different comparison between CPU and GPU by making some modifications to their code.

    – Robert Crovella
    Nov 19 '18 at 15:33














-1












-1








-1








Smart developer!
I am the beginner of CUDA programming and I have a big problem with my code.



Following code is a sample code from Nvidia and I changed a little bit for showing the GPU process much faster than from CPU process. However, after compiling this code, I got a unexpected result from that CPU process is much faster than GPU process.



This is my laptop gpu info.



This is my cuda code for Visual Studio 2017.



===========================================================================



#define N 10



This is add2 function() from GPU process



`___global____  void add2(int *a, int *b, int *c) {`

// GPU block from grid sector
//int tid = blockIdx.x; // checking the data of index = if you

insert min of N, you will get slow result from CPU. But if you put big number, this show much faster than CPU

// GPU thread
//int tid = threadIdx.x; // Same result as blockIdx.x

// GPU unexpected vector // Same result as above
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) {
c[tid] = a[tid] + b[tid];
}
}


This is add function() from CPU process



`void add(int *a, int *b, int *c) {

int tid = 0;

while (tid < N) {
c[tid] = a[tid] + b[tid];
tid += 1;
}
}


This is Main function()



int main() {

// Values for time duration
LARGE_INTEGER tFreq, tStart, tEnd;
cudaEvent_t start, stop;
float tms, ms;

int a[N], b[N], c[N]; // CPU values
int *dev_a, *dev_b, *dev_c; // GPU values----------------------------------------------

// Creating alloc for GPU--------------------------------------------------------------
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));

// Fill 'a' and 'b' from CPU
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i * i;
}

// Copy values of CPU to GPU values----------------------------------------------------
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);


//////////////////////////////////////
QueryPerformanceFrequency(&tFreq); // Frequency set
QueryPerformanceCounter(&tStart); // Time count Start

// CPU operation
add(a, b, c);

//////////////////////////////////////
QueryPerformanceCounter(&tEnd); // TIme count End
tms = ((tEnd.QuadPart - tStart.QuadPart) / (float)tFreq.QuadPart) * 1000;
//////////////////////////////////////

// show result of CPU
cout << fixed;
cout.precision(10);
cout << "CPU Time=" << tms << endl << endl;

for (int i = 0; i < N; i++) {
printf("CPU calculate = %d + %d = %dn", a[i], b[i], c[i]);
}

cout << endl;

///////////////////////////////////////
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// GPU operatinog---------------------------------------------------------------------
//add2 <<<N,1 >>> (dev_a, dev_b, dev_c); // block
//add2 << <1,N >> > (dev_a, dev_b, dev_c); // Thread
add2 << <N/32+1, 32 >> > (dev_a, dev_b, dev_c); // grid

///////////////////////////////////////
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
///////////////////////////////////////

// show result of GPU
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
cout << fixed;
cout.precision(10);
cout << "GPU Time=" << ms << endl << endl;


for (int i = 0; i < N; i++) {
printf("GPU calculate = %d + %d = %dn", a[i], b[i], c[i]);
}

//Free GPU values
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);

return 0;
}


This is result of compiling this code.



I want to make GPU process much faster than CPU process.










share|improve this question
















Smart developer!
I am the beginner of CUDA programming and I have a big problem with my code.



Following code is a sample code from Nvidia and I changed a little bit for showing the GPU process much faster than from CPU process. However, after compiling this code, I got a unexpected result from that CPU process is much faster than GPU process.



This is my laptop gpu info.



This is my cuda code for Visual Studio 2017.



===========================================================================



#define N 10



This is add2 function() from GPU process



`___global____  void add2(int *a, int *b, int *c) {`

// GPU block from grid sector
//int tid = blockIdx.x; // checking the data of index = if you

insert min of N, you will get slow result from CPU. But if you put big number, this show much faster than CPU

// GPU thread
//int tid = threadIdx.x; // Same result as blockIdx.x

// GPU unexpected vector // Same result as above
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) {
c[tid] = a[tid] + b[tid];
}
}


This is add function() from CPU process



`void add(int *a, int *b, int *c) {

int tid = 0;

while (tid < N) {
c[tid] = a[tid] + b[tid];
tid += 1;
}
}


This is Main function()



int main() {

// Values for time duration
LARGE_INTEGER tFreq, tStart, tEnd;
cudaEvent_t start, stop;
float tms, ms;

int a[N], b[N], c[N]; // CPU values
int *dev_a, *dev_b, *dev_c; // GPU values----------------------------------------------

// Creating alloc for GPU--------------------------------------------------------------
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));

// Fill 'a' and 'b' from CPU
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i * i;
}

// Copy values of CPU to GPU values----------------------------------------------------
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);


//////////////////////////////////////
QueryPerformanceFrequency(&tFreq); // Frequency set
QueryPerformanceCounter(&tStart); // Time count Start

// CPU operation
add(a, b, c);

//////////////////////////////////////
QueryPerformanceCounter(&tEnd); // TIme count End
tms = ((tEnd.QuadPart - tStart.QuadPart) / (float)tFreq.QuadPart) * 1000;
//////////////////////////////////////

// show result of CPU
cout << fixed;
cout.precision(10);
cout << "CPU Time=" << tms << endl << endl;

for (int i = 0; i < N; i++) {
printf("CPU calculate = %d + %d = %dn", a[i], b[i], c[i]);
}

cout << endl;

///////////////////////////////////////
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// GPU operatinog---------------------------------------------------------------------
//add2 <<<N,1 >>> (dev_a, dev_b, dev_c); // block
//add2 << <1,N >> > (dev_a, dev_b, dev_c); // Thread
add2 << <N/32+1, 32 >> > (dev_a, dev_b, dev_c); // grid

///////////////////////////////////////
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
///////////////////////////////////////

// show result of GPU
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
cout << fixed;
cout.precision(10);
cout << "GPU Time=" << ms << endl << endl;


for (int i = 0; i < N; i++) {
printf("GPU calculate = %d + %d = %dn", a[i], b[i], c[i]);
}

//Free GPU values
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);

return 0;
}


This is result of compiling this code.



I want to make GPU process much faster than CPU process.







c++ cuda gpu nvidia gpgpu






share|improve this question















share|improve this question













share|improve this question




share|improve this question








edited Nov 19 '18 at 9:56









Mike

2,0051722




2,0051722










asked Nov 19 '18 at 9:33









L SJinL SJin

33




33








  • 1





    OP has acknowledged in cross posting here that they can get a different comparison between CPU and GPU by making some modifications to their code.

    – Robert Crovella
    Nov 19 '18 at 15:33














  • 1





    OP has acknowledged in cross posting here that they can get a different comparison between CPU and GPU by making some modifications to their code.

    – Robert Crovella
    Nov 19 '18 at 15:33








1




1





OP has acknowledged in cross posting here that they can get a different comparison between CPU and GPU by making some modifications to their code.

– Robert Crovella
Nov 19 '18 at 15:33





OP has acknowledged in cross posting here that they can get a different comparison between CPU and GPU by making some modifications to their code.

– Robert Crovella
Nov 19 '18 at 15:33












1 Answer
1






active

oldest

votes


















1














The GPU is generally actually slower than the CPU for running a single operation. Additionally it takes time to send data to the GPU and read it back again.



The advantage of the GPU is it can execute many operations in parallel.



As you have defined N to be 10 it probably takes longer to upload and download the data than to execute on the CPU. In order to see the advantage of the GPU increase your problem size to something much larger. Ideally you want to execute a minimum of a few operations on each GPU core before you start seeing some benefit. For example with your GPU's 1280 cores you would want to execute something like 4000 operations or more at once to get the benefit of the GPU.






share|improve this answer
























  • You mean 400 000, not 4000. 4000 would like take the same time as 10...

    – talonmies
    Nov 19 '18 at 11:20













  • @talonmies it would take the same time as 10 to execute on the GPU but should take longer on the CPU. I agree that with GPU processing the more the merrier but I was giving an approximate minimum task size to start seeing some benefit.

    – Alan Birtles
    Nov 19 '18 at 11:42











  • @AlanBirtles Thank you so much to help me to understand what i don't know. By the way, I want to ask something else. Can I figure out what is minimum operations which can be faster than CPU? (This question is my curiosity to cuda programming. You can answer free or ignore it.)

    – L SJin
    Nov 20 '18 at 5:57













  • The only way to figure it out is by experimentation, depends on which operations you are performing, how much data you are using, whether your compiler optimises your cpu code to use SIMD instructions and of course how fast your CPU and GPU are

    – Alan Birtles
    Nov 20 '18 at 7:33











  • @AlanBirtles OK, I will do that! And Thank you again for giving me a advice!

    – L SJin
    Nov 21 '18 at 4:32













Your Answer






StackExchange.ifUsing("editor", function () {
StackExchange.using("externalEditor", function () {
StackExchange.using("snippets", function () {
StackExchange.snippets.init();
});
});
}, "code-snippets");

StackExchange.ready(function() {
var channelOptions = {
tags: "".split(" "),
id: "1"
};
initTagRenderer("".split(" "), "".split(" "), channelOptions);

StackExchange.using("externalEditor", function() {
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled) {
StackExchange.using("snippets", function() {
createEditor();
});
}
else {
createEditor();
}
});

function createEditor() {
StackExchange.prepareEditor({
heartbeatType: 'answer',
autoActivateHeartbeat: false,
convertImagesToLinks: true,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: 10,
bindNavPrevention: true,
postfix: "",
imageUploader: {
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
},
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
});


}
});














draft saved

draft discarded


















StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53371735%2fhow-can-i-make-gpu-process-much-faster-than-cpu-process-with-cuda-10-0-in-visual%23new-answer', 'question_page');
}
);

Post as a guest















Required, but never shown

























1 Answer
1






active

oldest

votes








1 Answer
1






active

oldest

votes









active

oldest

votes






active

oldest

votes









1














The GPU is generally actually slower than the CPU for running a single operation. Additionally it takes time to send data to the GPU and read it back again.



The advantage of the GPU is it can execute many operations in parallel.



As you have defined N to be 10 it probably takes longer to upload and download the data than to execute on the CPU. In order to see the advantage of the GPU increase your problem size to something much larger. Ideally you want to execute a minimum of a few operations on each GPU core before you start seeing some benefit. For example with your GPU's 1280 cores you would want to execute something like 4000 operations or more at once to get the benefit of the GPU.






share|improve this answer
























  • You mean 400 000, not 4000. 4000 would like take the same time as 10...

    – talonmies
    Nov 19 '18 at 11:20













  • @talonmies it would take the same time as 10 to execute on the GPU but should take longer on the CPU. I agree that with GPU processing the more the merrier but I was giving an approximate minimum task size to start seeing some benefit.

    – Alan Birtles
    Nov 19 '18 at 11:42











  • @AlanBirtles Thank you so much to help me to understand what i don't know. By the way, I want to ask something else. Can I figure out what is minimum operations which can be faster than CPU? (This question is my curiosity to cuda programming. You can answer free or ignore it.)

    – L SJin
    Nov 20 '18 at 5:57













  • The only way to figure it out is by experimentation, depends on which operations you are performing, how much data you are using, whether your compiler optimises your cpu code to use SIMD instructions and of course how fast your CPU and GPU are

    – Alan Birtles
    Nov 20 '18 at 7:33











  • @AlanBirtles OK, I will do that! And Thank you again for giving me a advice!

    – L SJin
    Nov 21 '18 at 4:32


















1














The GPU is generally actually slower than the CPU for running a single operation. Additionally it takes time to send data to the GPU and read it back again.



The advantage of the GPU is it can execute many operations in parallel.



As you have defined N to be 10 it probably takes longer to upload and download the data than to execute on the CPU. In order to see the advantage of the GPU increase your problem size to something much larger. Ideally you want to execute a minimum of a few operations on each GPU core before you start seeing some benefit. For example with your GPU's 1280 cores you would want to execute something like 4000 operations or more at once to get the benefit of the GPU.






share|improve this answer
























  • You mean 400 000, not 4000. 4000 would like take the same time as 10...

    – talonmies
    Nov 19 '18 at 11:20













  • @talonmies it would take the same time as 10 to execute on the GPU but should take longer on the CPU. I agree that with GPU processing the more the merrier but I was giving an approximate minimum task size to start seeing some benefit.

    – Alan Birtles
    Nov 19 '18 at 11:42











  • @AlanBirtles Thank you so much to help me to understand what i don't know. By the way, I want to ask something else. Can I figure out what is minimum operations which can be faster than CPU? (This question is my curiosity to cuda programming. You can answer free or ignore it.)

    – L SJin
    Nov 20 '18 at 5:57













  • The only way to figure it out is by experimentation, depends on which operations you are performing, how much data you are using, whether your compiler optimises your cpu code to use SIMD instructions and of course how fast your CPU and GPU are

    – Alan Birtles
    Nov 20 '18 at 7:33











  • @AlanBirtles OK, I will do that! And Thank you again for giving me a advice!

    – L SJin
    Nov 21 '18 at 4:32
















1












1








1







The GPU is generally actually slower than the CPU for running a single operation. Additionally it takes time to send data to the GPU and read it back again.



The advantage of the GPU is it can execute many operations in parallel.



As you have defined N to be 10 it probably takes longer to upload and download the data than to execute on the CPU. In order to see the advantage of the GPU increase your problem size to something much larger. Ideally you want to execute a minimum of a few operations on each GPU core before you start seeing some benefit. For example with your GPU's 1280 cores you would want to execute something like 4000 operations or more at once to get the benefit of the GPU.






share|improve this answer













The GPU is generally actually slower than the CPU for running a single operation. Additionally it takes time to send data to the GPU and read it back again.



The advantage of the GPU is it can execute many operations in parallel.



As you have defined N to be 10 it probably takes longer to upload and download the data than to execute on the CPU. In order to see the advantage of the GPU increase your problem size to something much larger. Ideally you want to execute a minimum of a few operations on each GPU core before you start seeing some benefit. For example with your GPU's 1280 cores you would want to execute something like 4000 operations or more at once to get the benefit of the GPU.







share|improve this answer












share|improve this answer



share|improve this answer










answered Nov 19 '18 at 11:13









Alan BirtlesAlan Birtles

8,68511033




8,68511033













  • You mean 400 000, not 4000. 4000 would like take the same time as 10...

    – talonmies
    Nov 19 '18 at 11:20













  • @talonmies it would take the same time as 10 to execute on the GPU but should take longer on the CPU. I agree that with GPU processing the more the merrier but I was giving an approximate minimum task size to start seeing some benefit.

    – Alan Birtles
    Nov 19 '18 at 11:42











  • @AlanBirtles Thank you so much to help me to understand what i don't know. By the way, I want to ask something else. Can I figure out what is minimum operations which can be faster than CPU? (This question is my curiosity to cuda programming. You can answer free or ignore it.)

    – L SJin
    Nov 20 '18 at 5:57













  • The only way to figure it out is by experimentation, depends on which operations you are performing, how much data you are using, whether your compiler optimises your cpu code to use SIMD instructions and of course how fast your CPU and GPU are

    – Alan Birtles
    Nov 20 '18 at 7:33











  • @AlanBirtles OK, I will do that! And Thank you again for giving me a advice!

    – L SJin
    Nov 21 '18 at 4:32





















  • You mean 400 000, not 4000. 4000 would like take the same time as 10...

    – talonmies
    Nov 19 '18 at 11:20













  • @talonmies it would take the same time as 10 to execute on the GPU but should take longer on the CPU. I agree that with GPU processing the more the merrier but I was giving an approximate minimum task size to start seeing some benefit.

    – Alan Birtles
    Nov 19 '18 at 11:42











  • @AlanBirtles Thank you so much to help me to understand what i don't know. By the way, I want to ask something else. Can I figure out what is minimum operations which can be faster than CPU? (This question is my curiosity to cuda programming. You can answer free or ignore it.)

    – L SJin
    Nov 20 '18 at 5:57













  • The only way to figure it out is by experimentation, depends on which operations you are performing, how much data you are using, whether your compiler optimises your cpu code to use SIMD instructions and of course how fast your CPU and GPU are

    – Alan Birtles
    Nov 20 '18 at 7:33











  • @AlanBirtles OK, I will do that! And Thank you again for giving me a advice!

    – L SJin
    Nov 21 '18 at 4:32



















You mean 400 000, not 4000. 4000 would like take the same time as 10...

– talonmies
Nov 19 '18 at 11:20







You mean 400 000, not 4000. 4000 would like take the same time as 10...

– talonmies
Nov 19 '18 at 11:20















@talonmies it would take the same time as 10 to execute on the GPU but should take longer on the CPU. I agree that with GPU processing the more the merrier but I was giving an approximate minimum task size to start seeing some benefit.

– Alan Birtles
Nov 19 '18 at 11:42





@talonmies it would take the same time as 10 to execute on the GPU but should take longer on the CPU. I agree that with GPU processing the more the merrier but I was giving an approximate minimum task size to start seeing some benefit.

– Alan Birtles
Nov 19 '18 at 11:42













@AlanBirtles Thank you so much to help me to understand what i don't know. By the way, I want to ask something else. Can I figure out what is minimum operations which can be faster than CPU? (This question is my curiosity to cuda programming. You can answer free or ignore it.)

– L SJin
Nov 20 '18 at 5:57







@AlanBirtles Thank you so much to help me to understand what i don't know. By the way, I want to ask something else. Can I figure out what is minimum operations which can be faster than CPU? (This question is my curiosity to cuda programming. You can answer free or ignore it.)

– L SJin
Nov 20 '18 at 5:57















The only way to figure it out is by experimentation, depends on which operations you are performing, how much data you are using, whether your compiler optimises your cpu code to use SIMD instructions and of course how fast your CPU and GPU are

– Alan Birtles
Nov 20 '18 at 7:33





The only way to figure it out is by experimentation, depends on which operations you are performing, how much data you are using, whether your compiler optimises your cpu code to use SIMD instructions and of course how fast your CPU and GPU are

– Alan Birtles
Nov 20 '18 at 7:33













@AlanBirtles OK, I will do that! And Thank you again for giving me a advice!

– L SJin
Nov 21 '18 at 4:32







@AlanBirtles OK, I will do that! And Thank you again for giving me a advice!

– L SJin
Nov 21 '18 at 4:32




















draft saved

draft discarded




















































Thanks for contributing an answer to Stack Overflow!


  • Please be sure to answer the question. Provide details and share your research!

But avoid



  • Asking for help, clarification, or responding to other answers.

  • Making statements based on opinion; back them up with references or personal experience.


To learn more, see our tips on writing great answers.




draft saved


draft discarded














StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53371735%2fhow-can-i-make-gpu-process-much-faster-than-cpu-process-with-cuda-10-0-in-visual%23new-answer', 'question_page');
}
);

Post as a guest















Required, but never shown





















































Required, but never shown














Required, but never shown












Required, but never shown







Required, but never shown

































Required, but never shown














Required, but never shown












Required, but never shown







Required, but never shown







Popular posts from this blog

鏡平學校

ꓛꓣだゔៀៅຸ໢ທຮ໕໒ ,ໂ'໥໓າ໼ឨឲ៵៭ៈゎゔit''䖳𥁄卿' ☨₤₨こゎもょの;ꜹꟚꞖꞵꟅꞛေၦေɯ,ɨɡ𛃵𛁹ޝ޳ޠ޾,ޤޒޯ޾𫝒𫠁သ𛅤チョ'サノބޘދ𛁐ᶿᶇᶀᶋᶠ㨑㽹⻮ꧬ꧹؍۩وَؠ㇕㇃㇪ ㇦㇋㇋ṜẰᵡᴠ 軌ᵕ搜۳ٰޗޮ޷ސޯ𫖾𫅀ल, ꙭ꙰ꚅꙁꚊꞻꝔ꟠Ꝭㄤﺟޱސꧨꧼ꧴ꧯꧽ꧲ꧯ'⽹⽭⾁⿞⼳⽋២៩ញណើꩯꩤ꩸ꩮᶻᶺᶧᶂ𫳲𫪭𬸄𫵰𬖩𬫣𬊉ၲ𛅬㕦䬺𫝌𫝼,,𫟖𫞽ហៅ஫㆔ాఆఅꙒꚞꙍ,Ꙟ꙱エ ,ポテ,フࢰࢯ𫟠𫞶 𫝤𫟠ﺕﹱﻜﻣ𪵕𪭸𪻆𪾩𫔷ġ,ŧآꞪ꟥,ꞔꝻ♚☹⛵𛀌ꬷꭞȄƁƪƬșƦǙǗdžƝǯǧⱦⱰꓕꓢႋ神 ဴ၀க௭எ௫ឫោ ' េㇷㇴㇼ神ㇸㇲㇽㇴㇼㇻㇸ'ㇸㇿㇸㇹㇰㆣꓚꓤ₡₧ ㄨㄟ㄂ㄖㄎ໗ツڒذ₶।ऩछएोञयूटक़कयँृी,冬'𛅢𛅥ㇱㇵㇶ𥄥𦒽𠣧𠊓𧢖𥞘𩔋цѰㄠſtʯʭɿʆʗʍʩɷɛ,əʏダヵㄐㄘR{gỚṖḺờṠṫảḙḭᴮᵏᴘᵀᵷᵕᴜᴏᵾq﮲ﲿﴽﭙ軌ﰬﶚﶧ﫲Ҝжюїкӈㇴffצּ﬘﭅﬈軌'ffistfflſtffतभफɳɰʊɲʎ𛁱𛁖𛁮𛀉 𛂯𛀞నఋŀŲ 𫟲𫠖𫞺ຆຆ ໹້໕໗ๆทԊꧢꧠ꧰ꓱ⿝⼑ŎḬẃẖỐẅ ,ờỰỈỗﮊDžȩꭏꭎꬻ꭮ꬿꭖꭥꭅ㇭神 ⾈ꓵꓑ⺄㄄ㄪㄙㄅㄇstA۵䞽ॶ𫞑𫝄㇉㇇゜軌𩜛𩳠Jﻺ‚Üမ႕ႌႊၐၸဓၞၞၡ៸wyvtᶎᶪᶹစဎ꣡꣰꣢꣤ٗ؋لㇳㇾㇻㇱ㆐㆔,,㆟Ⱶヤマފ޼ޝަݿݞݠݷݐ',ݘ,ݪݙݵ𬝉𬜁𫝨𫞘くせぉて¼óû×ó£…𛅑הㄙくԗԀ5606神45,神796'𪤻𫞧ꓐ㄁ㄘɥɺꓵꓲ3''7034׉ⱦⱠˆ“𫝋ȍ,ꩲ軌꩷ꩶꩧꩫఞ۔فڱێظペサ神ナᴦᵑ47 9238їﻂ䐊䔉㠸﬎ffiﬣ,לּᴷᴦᵛᵽ,ᴨᵤ ᵸᵥᴗᵈꚏꚉꚟ⻆rtǟƴ𬎎

Why https connections are so slow when debugging (stepping over) in Java?