@@ -214,6 +214,11 @@ static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_
214214static_assert (K_QUANTS_PER_ITERATION == 1  || K_QUANTS_PER_ITERATION == 2 , " K_QUANTS_PER_ITERATION must be 1 or 2"  );
215215#endif 
216216
217+ struct  ggml_tensor_extra_gpu  {
218+     void  * data_device[GGML_CUDA_MAX_DEVICES]; //  1 pointer for each device for split tensors
219+     cudaEvent_t events[GGML_CUDA_MAX_DEVICES]; //  events for synchronizing multiple GPUs
220+ };
221+ 
217222static  __global__  void  add_f32 (const  float  * x, const  float  * y, float  * dst, const  int  k) {
218223    const  int  i = blockDim .x *blockIdx .x  + threadIdx .x ;
219224
@@ -1970,7 +1975,6 @@ inline void ggml_cuda_op_add(
19701975    } else  {
19711976        GGML_ASSERT (false );
19721977    }
1973-     CUDA_CHECK (cudaGetLastError ());
19741978
19751979    (void ) src1;
19761980    (void ) dst;
@@ -2002,7 +2006,6 @@ inline void ggml_cuda_op_mul(
20022006
20032007        //  compute
20042008        mul_f32_cuda (src0_ddf_i01, src1_ddf_i01, dst_ddf_i01, ne00, ne10, cudaStream_main);
2005-         CUDA_CHECK (cudaGetLastError ());
20062009    }
20072010
20082011    (void ) dst;
@@ -2023,7 +2026,6 @@ inline void ggml_cuda_op_silu(
20232026
20242027    //  compute
20252028    silu_f32_cuda (src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
2026-     CUDA_CHECK (cudaGetLastError ());
20272029
20282030    (void ) src1;
20292031    (void ) dst;
@@ -2046,7 +2048,6 @@ inline void ggml_cuda_op_rms_norm(
20462048
20472049    //  compute
20482050    rms_norm_f32_cuda (src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
2049-     CUDA_CHECK (cudaGetLastError ());
20502051
20512052    (void ) src1;
20522053    (void ) dst;
@@ -2125,7 +2126,6 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec(
21252126            GGML_ASSERT (false );
21262127            break ;
21272128    }
2128-     CUDA_CHECK (cudaGetLastError ());
21292129
21302130#ifdef  GGML_CUDA_DMMV_F16
21312131    if  (src1_convert_f16) {
@@ -2202,7 +2202,6 @@ inline void ggml_cuda_op_rope(
22022202
22032203    //  compute
22042204    rope_f32_cuda (src0_ddf_i, dst_ddf_i, ne00, i01_diff, p, theta_scale, cudaStream_main);
2205-     CUDA_CHECK (cudaGetLastError ());
22062205
22072206    (void ) dst;
22082207    (void ) src0_ddq_i;
@@ -2226,7 +2225,6 @@ inline void ggml_cuda_op_diag_mask_inf(
22262225
22272226    //  compute
22282227    diag_mask_inf_f32_cuda (src0_ddf_i, dst_ddf_i, ne00, i01_diff, ne01, n_past, cudaStream_main);
2229-     CUDA_CHECK (cudaGetLastError ());
22302228
22312229    (void ) dst;
22322230    (void ) src0_ddq_i;
@@ -2248,7 +2246,6 @@ inline void ggml_cuda_op_soft_max(
22482246
22492247    //  compute
22502248    soft_max_f32_cuda (src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
2251-     CUDA_CHECK (cudaGetLastError ());
22522249
22532250    (void ) src1;
22542251    (void ) dst;
@@ -2344,10 +2341,11 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
23442341    size_t  src1_asf[GGML_CUDA_MAX_DEVICES] = {0 };
23452342    size_t   dst_asf[GGML_CUDA_MAX_DEVICES] = {0 };
23462343
2347-     //  if multiple GPUs are used they need to wait for the main GPU to finish
2344+     //  if multiple devices are used they need to wait for the main device
2345+     //  here an event is recorded that signifies that the main device has finished calculating the input data
23482346    if  (split && g_device_count > 1 ) {
23492347        CUDA_CHECK (cudaSetDevice (g_main_device));
2350-         CUDA_CHECK (cudaDeviceSynchronize ( ));
2348+         CUDA_CHECK (cudaEventRecord (src0_extra-> events [g_main_device], g_cudaStreams_main[g_main_device] ));
23512349    }
23522350
23532351    for  (int  id = 0 ; id < g_device_count; ++id) {
@@ -2373,6 +2371,12 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
23732371        int64_t  row_diff = row_high - row_low;
23742372
23752373        cudaSetDevice (id);
2374+         cudaStream_t cudaStream_main = g_cudaStreams_main[id];
2375+ 
2376+         //  wait for main GPU data if necessary
2377+         if  (split && id != g_main_device) {
2378+             CUDA_CHECK (cudaStreamWaitEvent (cudaStream_main, src0_extra->events [g_main_device]));
2379+         }
23762380
23772381        if  (src0_on_device && src0_is_contiguous) {
23782382            if  (src0_is_f32) {
@@ -2448,8 +2452,6 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
24482452                }
24492453                const  int64_t  i11 = i13*ne12 + i12;
24502454
2451-                 cudaStream_t cudaStream_main = g_cudaStreams_main[id];
2452- 
24532455                //  for split tensors the data begins at i0 == i0_offset_low
24542456                char   * src0_ddq_i = src0_ddq[id] + (i0 - i0_offset_low)*src0_stride*src0_ts/src0_bs;
24552457                float  * src0_ddf_i = src0_ddf[id] + (i0 - i0_offset_low)*src0_stride;
@@ -2509,6 +2511,7 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
25092511
25102512                //  do the computation
25112513                op (src0, src1, dst, src0_ddq_i, src0_ddf_i, src1_ddf_i, dst_ddf_i, i02, i01_low, i01_high, i11, cudaStream_main);
2514+                 CUDA_CHECK (cudaGetLastError ());
25122515
25132516                //  copy dst to host or other device if necessary
25142517                if  (!dst_on_device) {
@@ -2538,6 +2541,11 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
25382541                        CUDA_CHECK (cudaMemcpyAsync (dhf_dst_i, dst_ddf_i, dst_stride*sizeof (float ), kind, cudaStream_main));
25392542                    }
25402543                }
2544+ 
2545+                 //  signify to main device that other device is done
2546+                 if  (split && g_device_count > 1  && id != g_main_device) {
2547+                     CUDA_CHECK (cudaEventRecord (src0_extra->events [id], cudaStream_main));
2548+                 }
25412549            }
25422550        }
25432551    }
@@ -2549,7 +2557,6 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
25492557        }
25502558
25512559        CUDA_CHECK (cudaSetDevice (id));
2552-         CUDA_CHECK (cudaDeviceSynchronize ());
25532560
25542561        if  (src0_asq[id] > 0 ) {
25552562            ggml_cuda_pool_free (src0_ddq[id], src0_asq[id]);
@@ -2564,6 +2571,21 @@ static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggm
25642571            ggml_cuda_pool_free (dst_ddf[id], dst_asf[id]);
25652572        }
25662573    }
2574+ 
2575+     //  main device waits for all other devices to be finished
2576+     if  (split && g_device_count > 1 ) {
2577+         CUDA_CHECK (cudaSetDevice (g_main_device));
2578+         for  (int  id = 0 ; id < g_device_count; ++id) {
2579+             if  (id != g_main_device) {
2580+                 CUDA_CHECK (cudaStreamWaitEvent (g_cudaStreams_main[g_main_device], src0_extra->events [id]));
2581+             }
2582+         }
2583+     }
2584+ 
2585+     if  (dst->backend  == GGML_BACKEND_CPU) {
2586+         CUDA_CHECK (cudaSetDevice (g_main_device));
2587+         CUDA_CHECK (cudaDeviceSynchronize ());
2588+     }
25672589}
25682590
25692591void  ggml_cuda_add (const  ggml_tensor * src0, const  ggml_tensor * src1, ggml_tensor * dst) {
@@ -2803,6 +2825,10 @@ void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) {
28032825        cudaMemcpy (buf, buf_host, size, cudaMemcpyHostToDevice);
28042826
28052827        extra->data_device [id] = buf;
2828+ 
2829+         if  (backend == GGML_BACKEND_GPU_SPLIT) {
2830+             CUDA_CHECK (cudaEventCreateWithFlags (&extra->events [id], cudaEventDisableTiming));
2831+         }
28062832    }
28072833
28082834    tensor->extra  = extra;
@@ -2816,12 +2842,15 @@ void ggml_cuda_free_data(struct ggml_tensor * tensor) {
28162842    ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra ;
28172843
28182844    for  (int  id = 0 ; id < g_device_count; ++id) {
2819-         if  (extra->data_device [id] == nullptr ) {
2820-             continue ;
2845+         if  (extra->data_device [id] != nullptr ) {
2846+             CUDA_CHECK (cudaSetDevice (id));
2847+             CUDA_CHECK (cudaFree (extra->data_device [id]));
28212848        }
28222849
2823-         CUDA_CHECK (cudaSetDevice (id));
2824-         CUDA_CHECK (cudaFree (extra->data_device [id]));
2850+         if  (extra->events [id] != nullptr ) {
2851+             CUDA_CHECK (cudaSetDevice (id));
2852+             CUDA_CHECK (cudaEventDestroy (extra->events [id]));
2853+         }
28252854    }
28262855
28272856    delete  extra;
0 commit comments