From 7d3332c6b982747819d7673110c1143c18bac6b2 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 26 May 2025 08:23:17 +0300 Subject: [PATCH 01/11] CUDA: iq4_k_r4 dequantize --- ggml/src/ggml-cuda.cu | 1 + ggml/src/ggml-cuda/convert.cu | 97 +++++++++++++++++++++++++++++++++ ggml/src/ggml-cuda/iqk_mmvq.cu | 38 +++++++++++++ ggml/src/ggml-cuda/iqk_mmvq.cuh | 5 ++ 4 files changed, 141 insertions(+) diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index f55715f13..b5cc39538 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -3470,6 +3470,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons case GGML_TYPE_IQ6_K: case GGML_TYPE_IQ1_BN: case GGML_TYPE_IQ2_BN: + case GGML_TYPE_IQ4_K_R4: return true; default: return false; diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu index 17604f1cf..a616f8c05 100644 --- a/ggml/src/ggml-cuda/convert.cu +++ b/ggml/src/ggml-cuda/convert.cu @@ -754,6 +754,89 @@ static __global__ void dequantize_block_iq4_k(const void * __restrict__ vx, dst_ } } +template +static __global__ void dequantize_block_iq4_k_r4(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) { + + int64_t ii = blockIdx.x; + + //int64_t nblock = n_per_row/256; + //int64_t row = ii/nblock; + //int64_t ibl = ii - row*nblock; + //int64_t row4 = row/4; + //int64_t ir = row4%4; + + //const block_iq4_k_r4 * x = (const block_iq4_k_r4 *)vx + row4*nblock; + + //int64_t row4 = (256*ii)/(4*n_per_row); // rows of 4 index + //int64_t ibl = ii - row4*n_per_row/64; // block index within the rows of 4 + //int64_t ir = row4%4; // row + + //int64_t ibl = ii/4; + //int ir = ii%4; + int64_t nblock = n_per_row/256; + int64_t row = ii/nblock; + int64_t row4 = row/4; + int64_t ir = row%4; + int64_t ibl = row4*nblock + ii%nblock; + // ii = 0 -> row = 0, row4 = 0, ir = 0, ibl should be 0 + // ii = 1 -> row = 0, row4 = 0, ir = 0, ibl should be 1 + // ii = 2 -> row = 0, row4 = 0, ir = 0, ibl should be 2 + // ... + // ii = 16 -> row = 1, row4 = 0, ir = 1, ibl should be 0 + // .. + // ii = 64 -> row = 4, row4 = 1, ir = 0, ibl should be 16 + + + const block_iq4_k_r4 * x = (const block_iq4_k_r4 *)vx; + ////const block_iq4_k_r4 * x = (const block_iq4_k_r4 *)((const char *)vx + 4*row4*row_size); + + // Say, we have rows of 4096, and we have 8 rows -> 4096*8/256 = 128 blocks of 256, 16 blocks per row + // ii = 0 -> ibl = 0, ir = 0 -> warp processes 0...255 in row 0 + // ii = 1 -> ibl = 0, ir = 1 -> warp processes 0...255 in row 1 + // ii = 2 -> ibl = 0, ir = 2 -> warp processes 0...255 in row 2 + // ii = 3 -> ibl = 0, ir = 3 -> warp processes 0...255 in row 3 + // ii = 4 -> ibl = 1, ir = 0 -> warp processes 256...511 in row 0 + // ii = 5 -> ibl = 1, ir = 1 -> warp processes 256...511 in row 1 + // ii = 6 -> ibl = 1, ir = 2 -> warp processes 256...511 in row 2 + // ii = 7 -> ibl = 1, ir = 3 -> warp processes 256...511 in row 3 + // ... + // ii = 63 -> ibl = 15, ir = 3 -> warp processes 3840...4096 in row 3 + // ii = 64 -> ibl = 16, ir = 0 -> warp processes 0...255 in row 4, so offset is 4*4096 = 4*16*256 + const int tid = threadIdx.x; + const int il = tid/8; // 0...3 + const int ib = tid%8; // 0...7 + const float d = __half2float(x[ibl].d[ir]); + int is = 8*ib + ir; + float dl1 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32); + is += 4; + float dl2 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32); + auto values1 = iq4k_values + (((x[ibl].extra[ir+0] >> ib) & 1) << 4); + auto values2 = iq4k_values + (((x[ibl].extra[ir+4] >> ib) & 1) << 4); + dst_t * y = yy + 256*ii + 32*ib; + //dst_t * y = yy + (4*row4 + ir)*n_per_row + ibl*QK_K + 32*ib; + //dst_t * y = yy + ir*n_per_row + 4*ibl*QK_K + 32*ib; + auto qs = x[ibl].qs + 64*ib + 4*ir; + if constexpr (std::is_same_v) { + y[il+ 0] = __float2bfloat16(dl1 * values1[qs[il+ 0] & 0xf]); + y[il+ 8] = __float2bfloat16(dl1 * values1[qs[il+ 0] >> 4]); + y[il+16] = __float2bfloat16(dl2 * values2[qs[il+16] & 0xf]); + y[il+24] = __float2bfloat16(dl2 * values2[qs[il+16] >> 4]); + y[il+ 4] = __float2bfloat16(dl1 * values1[qs[il+32] & 0xf]); + y[il+12] = __float2bfloat16(dl1 * values1[qs[il+32] >> 4]); + y[il+20] = __float2bfloat16(dl2 * values2[qs[il+48] & 0xf]); + y[il+28] = __float2bfloat16(dl2 * values2[qs[il+48] >> 4]); + } else { + y[il+ 0] = dl1 * values1[qs[il+ 0] & 0xf]; + y[il+ 4] = dl1 * values1[qs[il+32] & 0xf]; + y[il+ 8] = dl1 * values1[qs[il+ 0] >> 4]; + y[il+12] = dl1 * values1[qs[il+32] >> 4]; + y[il+16] = dl2 * values2[qs[il+16] & 0xf]; + y[il+20] = dl2 * values2[qs[il+48] & 0xf]; + y[il+24] = dl2 * values2[qs[il+16] >> 4]; + y[il+28] = dl2 * values2[qs[il+48] >> 4]; + } +} + template static __global__ void dequantize_block_iq5_k(const void * __restrict__ vx, dst_t * __restrict__ yy) { @@ -1209,6 +1292,14 @@ static void dequantize_row_iq4_k_cuda(const void * vx, dst_t * y, const int64_t dequantize_block_iq4_k<<>>(vx, y); } +template +static void dequantize_row_iq4_k_r4_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { + const int64_t k = nrows * n_per_row; + const int64_t row_size = ggml_row_size(GGML_TYPE_IQ4_K, n_per_row); + const int nb = (k + QK_K - 1) / QK_K; + dequantize_block_iq4_k_r4<<>>(vx, y, n_per_row, row_size); +} + template static void dequantize_row_iq5_k_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { const int64_t k = nrows * n_per_row; @@ -1312,6 +1403,8 @@ to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type) { return dequantize_row_iq5_k_cuda; case GGML_TYPE_IQ6_K: return dequantize_row_iq6_k_cuda; + case GGML_TYPE_IQ4_K_R4: + return dequantize_row_iq4_k_r4_cuda; default: return nullptr; } @@ -1394,6 +1487,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { return convert_unary_cuda; case GGML_TYPE_BF16: return convert_from_bf16_cuda; + case GGML_TYPE_IQ4_K_R4: + return dequantize_row_iq4_k_r4_cuda; default: return nullptr; } @@ -1473,6 +1568,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { return convert_unary_cuda; case GGML_TYPE_BF16: return convert_from_bf16_cuda; + case GGML_TYPE_IQ4_K_R4: + return dequantize_row_iq4_k_r4_cuda; default: return nullptr; } diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index 6a2db725b..9720fd0fe 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -229,6 +229,36 @@ __device__ __forceinline__ float vec_dot_iq4_k_q8_1( return d * (sumi1 * ls1 + sumi2 * ls2); } +// TODO +__device__ __forceinline__ float vec_dot_iq4_k_r4_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { + + return 0.f; + + const block_iq4_k * bq4 = (const block_iq4_k *) vbq + kbx; + const uint8_t * all_values = (const uint8_t *)iq4k_values; + + // iqs is 0...28 + const int ib32 = iqs/4; + // Why iqs/4 ? + const int32_t * q8 = (const int *)bq8_1[ib32].qs; + const uint16_t * q4 = (const uint16_t *)bq4->qs + 8*ib32; + const uint16_t extra = bq4->extra >> 2*ib32; + int v1, v2; + int sumi1 = 0, sumi2 = 0; + for (int j = 0; j < 4; ++j) { + const uint32_t aux32 = q4[2*j+0] | (q4[2*j+1] << 16); + get_int_from_table_16_shift(aux32, extra, all_values, v1, v2); + sumi1 = ggml_cuda_dp4a(v1, q8[j+0], sumi1); + sumi2 = ggml_cuda_dp4a(v2, q8[j+4], sumi2); + } + const float d = __half2float(bq4->d) * __low2float(bq8_1[ib32].ds); + const uint8_t sh = bq4->scales_h[ib32/2] >> 4*(ib32%2); + const int ls1 = ((bq4->scales_l[ib32] & 0xf) | ((sh << 4) & 0x30)) - 32; + const int ls2 = ((bq4->scales_l[ib32] >> 4) | ((sh << 2) & 0x30)) - 32; + return d * (sumi1 * ls1 + sumi2 * ls2); +} + #define VDR_IQ4_KS_Q8_1_MMVQ 4 #define VDR_IQ4_KS_Q8_1_MMQ 4 @@ -800,6 +830,14 @@ void mul_mat_vec_iq4_k_q8_1_cuda( iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); } +void mul_mat_vec_iq4_k_r4_q8_1_cuda( + const void * vx, const void * vy, float * dst, const char * ids_data, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, + const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) { + + iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); +} + void mul_mat_vec_iq4_ks_q8_1_cuda( const void * vx, const void * vy, float * dst, const char * ids_data, const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cuh b/ggml/src/ggml-cuda/iqk_mmvq.cuh index b81d2114a..8de8e3fed 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cuh +++ b/ggml/src/ggml-cuda/iqk_mmvq.cuh @@ -60,3 +60,8 @@ void mul_mat_vec_iq2_bn_q8_1_cuda( const void * vx, const void * vy, float * dst, const char * ids_data, const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream); + +void mul_mat_vec_iq4_k_r4_q8_1_cuda( + const void * vx, const void * vy, float * dst, const char * ids_data, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, + const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream); From 1bbd526a9c131bdbb5894762d90ef4dc29e83394 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 26 May 2025 10:17:00 +0300 Subject: [PATCH 02/11] CUDA: iq4_k_r4 GEMV ~10% slower than iq4_k. --- ggml/src/ggml-cuda/convert.cu | 44 +------ ggml/src/ggml-cuda/iqk_mmvq.cu | 208 ++++++++++++++++++--------------- ggml/src/ggml-cuda/mmvq.cu | 4 + 3 files changed, 125 insertions(+), 131 deletions(-) diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu index a616f8c05..a200a2128 100644 --- a/ggml/src/ggml-cuda/convert.cu +++ b/ggml/src/ggml-cuda/convert.cu @@ -759,52 +759,19 @@ static __global__ void dequantize_block_iq4_k_r4(const void * __restrict__ vx, d int64_t ii = blockIdx.x; - //int64_t nblock = n_per_row/256; - //int64_t row = ii/nblock; - //int64_t ibl = ii - row*nblock; - //int64_t row4 = row/4; - //int64_t ir = row4%4; - - //const block_iq4_k_r4 * x = (const block_iq4_k_r4 *)vx + row4*nblock; - - //int64_t row4 = (256*ii)/(4*n_per_row); // rows of 4 index - //int64_t ibl = ii - row4*n_per_row/64; // block index within the rows of 4 - //int64_t ir = row4%4; // row - - //int64_t ibl = ii/4; - //int ir = ii%4; int64_t nblock = n_per_row/256; int64_t row = ii/nblock; int64_t row4 = row/4; int64_t ir = row%4; int64_t ibl = row4*nblock + ii%nblock; - // ii = 0 -> row = 0, row4 = 0, ir = 0, ibl should be 0 - // ii = 1 -> row = 0, row4 = 0, ir = 0, ibl should be 1 - // ii = 2 -> row = 0, row4 = 0, ir = 0, ibl should be 2 - // ... - // ii = 16 -> row = 1, row4 = 0, ir = 1, ibl should be 0 - // .. - // ii = 64 -> row = 4, row4 = 1, ir = 0, ibl should be 16 - - const block_iq4_k_r4 * x = (const block_iq4_k_r4 *)vx; - ////const block_iq4_k_r4 * x = (const block_iq4_k_r4 *)((const char *)vx + 4*row4*row_size); - - // Say, we have rows of 4096, and we have 8 rows -> 4096*8/256 = 128 blocks of 256, 16 blocks per row - // ii = 0 -> ibl = 0, ir = 0 -> warp processes 0...255 in row 0 - // ii = 1 -> ibl = 0, ir = 1 -> warp processes 0...255 in row 1 - // ii = 2 -> ibl = 0, ir = 2 -> warp processes 0...255 in row 2 - // ii = 3 -> ibl = 0, ir = 3 -> warp processes 0...255 in row 3 - // ii = 4 -> ibl = 1, ir = 0 -> warp processes 256...511 in row 0 - // ii = 5 -> ibl = 1, ir = 1 -> warp processes 256...511 in row 1 - // ii = 6 -> ibl = 1, ir = 2 -> warp processes 256...511 in row 2 - // ii = 7 -> ibl = 1, ir = 3 -> warp processes 256...511 in row 3 - // ... - // ii = 63 -> ibl = 15, ir = 3 -> warp processes 3840...4096 in row 3 - // ii = 64 -> ibl = 16, ir = 0 -> warp processes 0...255 in row 4, so offset is 4*4096 = 4*16*256 const int tid = threadIdx.x; const int il = tid/8; // 0...3 const int ib = tid%8; // 0...7 + + const block_iq4_k_r4 * x = (const block_iq4_k_r4 *)vx; + dst_t * y = yy + 256*ii + 32*ib; + const float d = __half2float(x[ibl].d[ir]); int is = 8*ib + ir; float dl1 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32); @@ -812,9 +779,6 @@ static __global__ void dequantize_block_iq4_k_r4(const void * __restrict__ vx, d float dl2 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32); auto values1 = iq4k_values + (((x[ibl].extra[ir+0] >> ib) & 1) << 4); auto values2 = iq4k_values + (((x[ibl].extra[ir+4] >> ib) & 1) << 4); - dst_t * y = yy + 256*ii + 32*ib; - //dst_t * y = yy + (4*row4 + ir)*n_per_row + ibl*QK_K + 32*ib; - //dst_t * y = yy + ir*n_per_row + 4*ibl*QK_K + 32*ib; auto qs = x[ibl].qs + 64*ib + 4*ir; if constexpr (std::is_same_v) { y[il+ 0] = __float2bfloat16(dl1 * values1[qs[il+ 0] & 0xf]); diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index 9720fd0fe..75a51bf60 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -6,7 +6,15 @@ #include "iqk_mmvq.cuh" -typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs); +typedef void (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float *); + +template<> +struct ggml_cuda_type_traits { + static constexpr int qk = QK_K; + static constexpr int qr = QR4_XS; + static constexpr int qi = QI4_XS; +}; + // Reminder: // constexpr int qk = ggml_cuda_type_traits::qk; @@ -14,7 +22,7 @@ typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_ // constexpr int vdr = get_vdr_mmvq(type); namespace { -template +template __device__ void iqk_mul_mat_vec_q( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst, const int64_t row_size) { @@ -24,10 +32,10 @@ __device__ void iqk_mul_mat_vec_q( #if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && (defined(RDNA2) || defined(RDNA3)) constexpr int nwarps = 1; - constexpr int rows_per_cuda_block = 1; + constexpr int rows_per_cuda_block = n_interleaved; #else constexpr int nwarps = ncols_y <= 4 ? 4 : 2; - constexpr int rows_per_cuda_block = ncols_y == 1 ? 1 : 2; + constexpr int rows_per_cuda_block = n_interleaved == 1 ? ncols_y == 1 ? 1 : 2 : n_interleaved; #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && !defined(RDNA2) && !defined(RDNA3) const int tid = WARP_SIZE*threadIdx.y + threadIdx.x; @@ -49,10 +57,15 @@ __device__ void iqk_mul_mat_vec_q( #pragma unroll for (int j = 0; j < ncols_y; ++j) { + if constexpr (n_interleaved == 1) { #pragma unroll - for (int i = 0; i < rows_per_cuda_block; ++i) { - tmp[j][i] += vec_dot_q_cuda((const void *)((const char *)vx + (row0 + i)*row_size), - &y[j*blocks_per_col_y + kby], kbx, kqs); + for (int i = 0; i < rows_per_cuda_block; ++i) { + vec_dot_q_cuda((const void *)((const char *)vx + (row0 + i)*row_size), + &y[j*blocks_per_col_y + kby], kbx, kqs, &tmp[j][i]); + } + } else { + vec_dot_q_cuda((const void *)((const char *)vx + row0*row_size), + &y[j*blocks_per_col_y + kby], kbx, kqs, tmp[j]); } } } @@ -90,7 +103,7 @@ __device__ void iqk_mul_mat_vec_q( } } -template +template #if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) // tell the compiler to use as many registers as it wants, see nwarps definition below __launch_bounds__((ncols_y <= 4 ? 4 : 2)*WARP_SIZE, 1) @@ -105,10 +118,10 @@ __global__ void iqk_mul_mat_vec_q( const char * cx = (const char *)vx + i02*nb02; const char * cy = (const char *)vy + i2*nb12; char * cdst = (char *)dst + i2*nb2; - iqk_mul_mat_vec_q(cx, cy, (float *)cdst, ncols_x, nrows_x, nrows_y, nrows_dst, row_size); + iqk_mul_mat_vec_q(cx, cy, (float *)cdst, ncols_x, nrows_x, nrows_y, nrows_dst, row_size); } -template +template void iqk_mul_mat_vec_q_cuda( const void * vx, const void * vy, float * dst, const char * ids_data, const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, @@ -120,26 +133,26 @@ void iqk_mul_mat_vec_q_cuda( int id = ggml_cuda_get_device(); int64_t nwarps = 1; - int64_t rows_per_cuda_block = 1; + int64_t rows_per_cuda_block = n_interleaved; if (ggml_cuda_info().devices[id].cc < CC_RDNA2) { // NVIDIA and AMD older than RDNA2 switch(ncols_y) { case 1: nwarps = 4; - rows_per_cuda_block = 1; + rows_per_cuda_block = n_interleaved == 1 ? 1 : n_interleaved; break; case 2: case 3: case 4: nwarps = 4; - rows_per_cuda_block = 2; + rows_per_cuda_block = n_interleaved == 1 ? 2 : n_interleaved; break; case 5: case 6: case 7: case 8: nwarps = 2; - rows_per_cuda_block = 2; + rows_per_cuda_block = n_interleaved == 1 ? 2 : n_interleaved; break; default: GGML_ASSERT(false); @@ -154,28 +167,28 @@ void iqk_mul_mat_vec_q_cuda( switch (ncols_y) { case 1: - iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); + iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); break; case 2: - iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); + iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); break; case 3: - iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); + iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); break; case 4: - iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); + iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); break; case 5: - iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); + iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); break; case 6: - iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); + iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); break; case 7: - iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); + iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); break; case 8: - iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); + iqk_mul_mat_vec_q<<>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0); break; default: GGML_ASSERT(false); @@ -202,8 +215,8 @@ __device__ __forceinline__ void get_int_from_table_16_shift(const uint32_t & q4, #define VDR_IQ4_K_Q8_1_MMVQ 4 #define VDR_IQ4_K_Q8_1_MMQ 4 -__device__ __forceinline__ float vec_dot_iq4_k_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { +__device__ __forceinline__ void vec_dot_iq4_k_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { const block_iq4_k * bq4 = (const block_iq4_k *) vbq + kbx; const uint8_t * all_values = (const uint8_t *)iq4k_values; @@ -226,44 +239,60 @@ __device__ __forceinline__ float vec_dot_iq4_k_q8_1( const uint8_t sh = bq4->scales_h[ib32/2] >> 4*(ib32%2); const int ls1 = ((bq4->scales_l[ib32] & 0xf) | ((sh << 4) & 0x30)) - 32; const int ls2 = ((bq4->scales_l[ib32] >> 4) | ((sh << 2) & 0x30)) - 32; - return d * (sumi1 * ls1 + sumi2 * ls2); + *result += d * (sumi1 * ls1 + sumi2 * ls2); } -// TODO -__device__ __forceinline__ float vec_dot_iq4_k_r4_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { +static __device__ __forceinline__ int2 get_int_from_table_16(const int & q4, const int8_t * values) { + const int q0_32 = (q4 >> 0) & 0x0F0F0F0F; + const int8_t * q0_8 = (const int8_t *) &q0_32; + const char4 val0_8 = make_char4(values[q0_8[0]], values[q0_8[1]], values[q0_8[2]], values[q0_8[3]]); - return 0.f; + const int q1_32 = (q4 >> 4) & 0x0F0F0F0F; + const int8_t * q1_8 = (const int8_t *) &q1_32; + const char4 val1_8 = make_char4(values[q1_8[0]], values[q1_8[1]], values[q1_8[2]], values[q1_8[3]]); - const block_iq4_k * bq4 = (const block_iq4_k *) vbq + kbx; - const uint8_t * all_values = (const uint8_t *)iq4k_values; + return make_int2(*((const int *) &val0_8), *((const int *) &val1_8)); +} + +// TODO +__device__ __forceinline__ void vec_dot_iq4_k_r4_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { + + const block_iq4_k_r4 * bq4 = (const block_iq4_k_r4 *)vbq + kbx; // iqs is 0...28 const int ib32 = iqs/4; - // Why iqs/4 ? + const float d8 = __low2float(bq8_1[ib32].ds); const int32_t * q8 = (const int *)bq8_1[ib32].qs; - const uint16_t * q4 = (const uint16_t *)bq4->qs + 8*ib32; - const uint16_t extra = bq4->extra >> 2*ib32; - int v1, v2; - int sumi1 = 0, sumi2 = 0; - for (int j = 0; j < 4; ++j) { - const uint32_t aux32 = q4[2*j+0] | (q4[2*j+1] << 16); - get_int_from_table_16_shift(aux32, extra, all_values, v1, v2); - sumi1 = ggml_cuda_dp4a(v1, q8[j+0], sumi1); - sumi2 = ggml_cuda_dp4a(v2, q8[j+4], sumi2); + + int2 val1, val2; + const int * q4 = (const int *)bq4->qs + 16*ib32; + for (int i = 0; i < 4; ++i) { + auto values1 = iq4k_values + (((bq4->extra[i+0] >> ib32) & 1) << 4); + auto values2 = iq4k_values + (((bq4->extra[i+4] >> ib32) & 1) << 4); + int sumi1 = 0, sumi2 = 0; + val1 = get_int_from_table_16(q4[i+ 0], values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[2], sumi1)); + val2 = get_int_from_table_16(q4[i+ 4], values2); + sumi2 = ggml_cuda_dp4a(val2.x, q8[4], ggml_cuda_dp4a(val2.y, q8[6], sumi2)); + val1 = get_int_from_table_16(q4[i+ 8], values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[1], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); + val2 = get_int_from_table_16(q4[i+12], values2); + sumi2 = ggml_cuda_dp4a(val2.x, q8[5], ggml_cuda_dp4a(val2.y, q8[7], sumi2)); + int is = 8*ib32 + i; + int ls1 = (((bq4->scales_l[is%32] >> 4*(is/32)) & 0xf) | (((bq4->scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32; + is += 4; + int ls2 = (((bq4->scales_l[is%32] >> 4*(is/32)) & 0xf) | (((bq4->scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32; + const float d = __half2float(bq4->d[i]) * d8; + result[i] += d * (sumi1 * ls1 + sumi2 * ls2); } - const float d = __half2float(bq4->d) * __low2float(bq8_1[ib32].ds); - const uint8_t sh = bq4->scales_h[ib32/2] >> 4*(ib32%2); - const int ls1 = ((bq4->scales_l[ib32] & 0xf) | ((sh << 4) & 0x30)) - 32; - const int ls2 = ((bq4->scales_l[ib32] >> 4) | ((sh << 2) & 0x30)) - 32; - return d * (sumi1 * ls1 + sumi2 * ls2); } #define VDR_IQ4_KS_Q8_1_MMVQ 4 #define VDR_IQ4_KS_Q8_1_MMQ 4 -__device__ __forceinline__ float vec_dot_iq4_ks_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { +__device__ __forceinline__ void vec_dot_iq4_ks_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { float scale = *(const float *)vbq; const block_iq4_ks * bq4 = (const block_iq4_ks *)((const char *)vbq + sizeof(float)) + kbx; @@ -281,14 +310,14 @@ __device__ __forceinline__ float vec_dot_iq4_ks_q8_1( sumi = ggml_cuda_dp4a(v1, q8[j+0], sumi); sumi = ggml_cuda_dp4a(v2, q8[j+4], sumi); } - return dl * __low2float(bq8_1[ib32].ds) * sumi; + *result += dl * __low2float(bq8_1[ib32].ds) * sumi; } #define VDR_IQ4_KSS_Q8_1_MMVQ 4 #define VDR_IQ4_KSS_Q8_1_MMQ 4 -__device__ __forceinline__ float vec_dot_iq4_kss_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { +__device__ __forceinline__ void vec_dot_iq4_kss_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { float scale = *(const float *)vbq; const block_iq4_kss * bq4 = (const block_iq4_kss *)((const char *)vbq + sizeof(float)) + kbx; @@ -310,7 +339,7 @@ __device__ __forceinline__ float vec_dot_iq4_kss_q8_1( sumi = ggml_cuda_dp4a(v1, q8[j+0], sumi); sumi = ggml_cuda_dp4a(v2, q8[j+4], sumi); } - return dl * __low2float(bq8_1[ib32].ds) * sumi; + *result += dl * __low2float(bq8_1[ib32].ds) * sumi; } #define VDR_IQ5_K_Q8_1_MMVQ 4 @@ -322,9 +351,8 @@ __device__ __forceinline__ int int_from_table(const uint8_t * a8, const uint8_t return v1 | (v2 << 16); } -__device__ __forceinline__ float vec_dot_iq5_k_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { - +__device__ __forceinline__ void vec_dot_iq5_k_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { const block_iq5_k * bq5 = (const block_iq5_k *) vbq + kbx; const uint8_t * all_values = (const uint8_t *)iq5nl_values; @@ -355,11 +383,11 @@ __device__ __forceinline__ float vec_dot_iq5_k_q8_1( const uint8_t sh = bq5->scales_h[i4/2] >> 2*(i4%2); const int ls1 = (((bq5->scales_l[2*(i4/2)+0] >> 4*(i4%2)) & 0xf) | ((sh << 4) & 0x30)) - 32; const int ls2 = (((bq5->scales_l[2*(i4/2)+1] >> 4*(i4%2)) & 0xf) | ((sh << 0) & 0x30)) - 32; - return d5 * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2); + *result += d5 * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2); } -__device__ __forceinline__ float vec_dot_iq5_ks_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { +__device__ __forceinline__ void vec_dot_iq5_ks_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { float scale = *(const float *)vbq; const block_iq5_ks * bq5 = (const block_iq5_ks *)((const char *)vbq + sizeof(float)) + kbx; @@ -388,15 +416,14 @@ __device__ __forceinline__ float vec_dot_iq5_ks_q8_1( } const int ls1 = (bq5->scales[2*(i4/2)+0] & 254) - 127; const int ls2 = (bq5->scales[2*(i4/2)+1] & 254) - 127; - return scale * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2); + *result += scale * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2); } #define VDR_IQ6_K_Q8_1_MMVQ 4 #define VDR_IQ6_K_Q8_1_MMQ 4 -__device__ __forceinline__ float vec_dot_iq6_k_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { - +__device__ __forceinline__ void vec_dot_iq6_k_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { const block_iq6_k * bq6 = (const block_iq6_k *) vbq + kbx; const uint8_t * all_values = (const uint8_t *)iq6nl_values; @@ -425,7 +452,7 @@ __device__ __forceinline__ float vec_dot_iq6_k_q8_1( sumi2 = ggml_cuda_dp4a(v2, q8_2[j], sumi2); } const float d6 = __half2float(bq6->d); - return d6 * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * bq6->scales[4*(i4/2)+(i4%2)] + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * bq6->scales[4*(i4/2)+(i4%2)+2]); + *result += d6 * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * bq6->scales[4*(i4/2)+(i4%2)] + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * bq6->scales[4*(i4/2)+(i4%2)+2]); } static const __device__ uint32_t iq2k_table[512] = { @@ -502,8 +529,8 @@ __device__ __forceinline__ int int_from_table_4(const uint8_t * a8, const int * #define VDR_IQ2_K_Q8_1_MMVQ 4 #define VDR_IQ2_K_Q8_1_MMQ 4 -__device__ __forceinline__ float vec_dot_iq2_k_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { +__device__ __forceinline__ void vec_dot_iq2_k_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { // iqs is 0, 4, 8, 12, 16, 20, 24, 28 // we have 16 packed quants (when cast to int) @@ -554,18 +581,17 @@ __device__ __forceinline__ float vec_dot_iq2_k_q8_1( v2 = int_from_table_4(a8 + 4, values); int sumi4 = ggml_cuda_dp4a(v2, q8_4[1], ggml_cuda_dp4a(v1, q8_4[0], 0)) * s8[3]; - return __half2float(bq2->d) * (__low2float(bq8_1[4*(i4/4)+0].ds) * sumi1 - + __low2float(bq8_1[4*(i4/4)+1].ds) * sumi2 - + __low2float(bq8_1[4*(i4/4)+2].ds) * sumi3 - + __low2float(bq8_1[4*(i4/4)+3].ds) * sumi4); - + *result += __half2float(bq2->d) * (__low2float(bq8_1[4*(i4/4)+0].ds) * sumi1 + + __low2float(bq8_1[4*(i4/4)+1].ds) * sumi2 + + __low2float(bq8_1[4*(i4/4)+2].ds) * sumi3 + + __low2float(bq8_1[4*(i4/4)+3].ds) * sumi4); } #define VDR_IQ2_KS_Q8_1_MMVQ 4 #define VDR_IQ2_KS_Q8_1_MMQ 4 -__device__ __forceinline__ float vec_dot_iq2_ks_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { +__device__ __forceinline__ void vec_dot_iq2_ks_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { float scale = *(const half *)vbq; const block_iq2_ks * bq2 = (const block_iq2_ks *)((const char *)vbq + sizeof(half)) + kbx; @@ -614,10 +640,10 @@ __device__ __forceinline__ float vec_dot_iq2_ks_q8_1( v2 = int_from_table_4(a8 + 4, values); int sumi4 = ggml_cuda_dp4a(v2, q8_4[1], ggml_cuda_dp4a(v1, q8_4[0], 0)) * s8[3]; - return scale * (__low2float(bq8_1[4*(i4/4)+0].ds) * sumi1 - + __low2float(bq8_1[4*(i4/4)+1].ds) * sumi2 - + __low2float(bq8_1[4*(i4/4)+2].ds) * sumi3 - + __low2float(bq8_1[4*(i4/4)+3].ds) * sumi4); + *result += scale * (__low2float(bq8_1[4*(i4/4)+0].ds) * sumi1 + + __low2float(bq8_1[4*(i4/4)+1].ds) * sumi2 + + __low2float(bq8_1[4*(i4/4)+2].ds) * sumi3 + + __low2float(bq8_1[4*(i4/4)+3].ds) * sumi4); } #define VDR_IQ3_K_Q8_1_MMVQ 4 @@ -638,8 +664,8 @@ __device__ __forceinline__ int int_from_table_2(const uint8_t * a8, const uint16 return values[a8[0] | (a8[1] << 3)] | (values[a8[2] | (a8[3] << 3)] << 16); } -__device__ __forceinline__ float vec_dot_iq3_k_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs) { +__device__ __forceinline__ void vec_dot_iq3_k_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs, float * result) { const block_iq3_k * bq3 = (const block_iq3_k *) vbq + kbx; int iqs = iiqs/4; @@ -697,15 +723,15 @@ __device__ __forceinline__ float vec_dot_iq3_k_q8_1( const float d = __half2float(bq3->d); const uint16_t * sl16 = (const uint16_t *)bq3->scales_l + 2*ib128; aux32 = ((((sl16[0] | (sl16[1] << 16)) >> shift) & 0x0f0f0f0f) << 1) | 0x01010101; - return d * (__low2float(bq8_1[4*ib128+0].ds) * aux8[0] * (sh & 0x01 ? -1 : 1) * sumi[0] + - __low2float(bq8_1[4*ib128+1].ds) * aux8[1] * (sh & 0x04 ? -1 : 1) * sumi[1] + - __low2float(bq8_1[4*ib128+2].ds) * aux8[2] * (sh & 0x10 ? -1 : 1) * sumi[2] + - __low2float(bq8_1[4*ib128+3].ds) * aux8[3] * (sh & 0x40 ? -1 : 1) * sumi[3]); + *result += d * (__low2float(bq8_1[4*ib128+0].ds) * aux8[0] * (sh & 0x01 ? -1 : 1) * sumi[0] + + __low2float(bq8_1[4*ib128+1].ds) * aux8[1] * (sh & 0x04 ? -1 : 1) * sumi[1] + + __low2float(bq8_1[4*ib128+2].ds) * aux8[2] * (sh & 0x10 ? -1 : 1) * sumi[2] + + __low2float(bq8_1[4*ib128+3].ds) * aux8[3] * (sh & 0x40 ? -1 : 1) * sumi[3]); } -__device__ __forceinline__ float vec_dot_iq1_bn_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { +__device__ __forceinline__ void vec_dot_iq1_bn_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { half d16; memcpy(&d16, vbq, sizeof(d16)); float scale = d16; @@ -739,7 +765,7 @@ __device__ __forceinline__ float vec_dot_iq1_bn_q8_1( sumi = __dp4a(val[0], q8[4*l+0], __dp4a(val[1], q8[4*l+1], __dp4a(val[2], q8[4*l+2], __dp4a(val[3], q8[4*l+3], sumi)))); } float2 d8 = __half22float2(bq8_1[iqs].ds); - return scale * (d8.x * sumi - d8.y); + *result += scale * (d8.x * sumi - d8.y); #else static const uint16_t k_mult[5] = {81, 27, 9, 3, 1}; const int8_t * q8 = bq8_1[iqs].qs; @@ -759,12 +785,12 @@ __device__ __forceinline__ float vec_dot_iq1_bn_q8_1( sumi += q8[0]*(vs - 1); q8++; } - return scale * __low2float(bq8_1[iqs].ds) * sumi; + *result += scale * __low2float(bq8_1[iqs].ds) * sumi; #endif } -__device__ __forceinline__ float vec_dot_iq2_bn_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) { +__device__ __forceinline__ void vec_dot_iq2_bn_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { float scale = *(const float *)vbq; const block_iq2_bn * bq2 = (const block_iq2_bn *)((const char *)vbq + sizeof(float)) + kbx; @@ -786,7 +812,7 @@ __device__ __forceinline__ float vec_dot_iq2_bn_q8_1( } auto d8l = __half22float2(bq8_1[0].ds); auto d8h = __half22float2(bq8_1[1].ds); - return scale * (d8l.x * (sumi1 + 0.25f*sumi2) + d8h.x * (sumi3 + 0.25f * sumi4) - 0.5f*d8l.y - 0.5f*d8h.y); + *result += scale * (d8l.x * (sumi1 + 0.25f*sumi2) + d8h.x * (sumi3 + 0.25f * sumi4) - 0.5f*d8l.y - 0.5f*d8h.y); #else int sumi1 = 0, sumi2 = 0, sumi3 = 0, sumi4 = 0; auto q8l = bq8_1[0].qs + 8*iqs; @@ -800,7 +826,7 @@ __device__ __forceinline__ float vec_dot_iq2_bn_q8_1( } auto d8l = __half22float2(bq8_1[0].ds); auto d8h = __half22float2(bq8_1[1].ds); - return scale * (d8l.x * (sumi1 + 0.25f*sumi2) + 0.0625f * d8h.x*(sumi3 + 0.25f*sumi4) - 0.5f*d8l.y - 0.5f*d8h.y); + *result += scale * (d8l.x * (sumi1 + 0.25f*sumi2) + 0.0625f * d8h.x*(sumi3 + 0.25f*sumi4) - 0.5f*d8l.y - 0.5f*d8h.y); #endif } @@ -835,7 +861,7 @@ void mul_mat_vec_iq4_k_r4_q8_1_cuda( const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) { - iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); + iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); } void mul_mat_vec_iq4_ks_q8_1_cuda( diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu index 89b74f4bf..19eb1aabf 100644 --- a/ggml/src/ggml-cuda/mmvq.cu +++ b/ggml/src/ggml-cuda/mmvq.cu @@ -542,6 +542,9 @@ static void ggml_cuda_op_mul_mat_vec_q_impl(ggml_backend_cuda_context & ctx, ggm case GGML_TYPE_IQ3_S: mul_mat_vec_iq3_s_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); break; + case GGML_TYPE_IQ4_K_R4: + mul_mat_vec_iq4_k_r4_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); + break; default: GGML_ABORT("fatal error"); break; @@ -655,6 +658,7 @@ bool ggml_cuda_mmvq_type_supported(ggml_type src0_type) { case GGML_TYPE_IQ5_KS: case GGML_TYPE_IQ6_K: case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ4_K_R4: return true; default: return false; From c6b711c8b0696c916975b0fdca7889c564e399f6 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 26 May 2025 10:52:47 +0300 Subject: [PATCH 03/11] CUDA: slightly faster iq4_k_r4 GEMV --- ggml/src/ggml-cuda/iqk_mmvq.cu | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index 75a51bf60..842174b30 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -34,7 +34,7 @@ __device__ void iqk_mul_mat_vec_q( constexpr int nwarps = 1; constexpr int rows_per_cuda_block = n_interleaved; #else - constexpr int nwarps = ncols_y <= 4 ? 4 : 2; + constexpr int nwarps = n_interleaved == 1 ? ncols_y <= 4 ? 4 : 2 : 1; constexpr int rows_per_cuda_block = n_interleaved == 1 ? ncols_y == 1 ? 1 : 2 : n_interleaved; #endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && !defined(RDNA2) && !defined(RDNA3) @@ -138,20 +138,20 @@ void iqk_mul_mat_vec_q_cuda( if (ggml_cuda_info().devices[id].cc < CC_RDNA2) { // NVIDIA and AMD older than RDNA2 switch(ncols_y) { case 1: - nwarps = 4; + nwarps = n_interleaved == 1 ? 4 : 1; rows_per_cuda_block = n_interleaved == 1 ? 1 : n_interleaved; break; case 2: case 3: case 4: - nwarps = 4; + nwarps = n_interleaved == 1 ? 4 : 1; rows_per_cuda_block = n_interleaved == 1 ? 2 : n_interleaved; break; case 5: case 6: case 7: case 8: - nwarps = 2; + nwarps = n_interleaved == 1 ? 2 : 1; rows_per_cuda_block = n_interleaved == 1 ? 2 : n_interleaved; break; default: @@ -265,6 +265,12 @@ __device__ __forceinline__ void vec_dot_iq4_k_r4_q8_1( const float d8 = __low2float(bq8_1[ib32].ds); const int32_t * q8 = (const int *)bq8_1[ib32].qs; + int scales[2]; + const uint32_t * scales_l = (const uint32_t *)bq4->scales_l; + const uint32_t * scales_h = (const uint32_t *)bq4->scales_h; + scales[0] = __vsub4(((scales_l[2*(ib32%4)+0] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+0] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020); + scales[1] = __vsub4(((scales_l[2*(ib32%4)+1] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+1] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020); + const int8_t * s8 = (const int8_t *)scales; int2 val1, val2; const int * q4 = (const int *)bq4->qs + 16*ib32; for (int i = 0; i < 4; ++i) { @@ -279,12 +285,8 @@ __device__ __forceinline__ void vec_dot_iq4_k_r4_q8_1( sumi1 = ggml_cuda_dp4a(val1.x, q8[1], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); val2 = get_int_from_table_16(q4[i+12], values2); sumi2 = ggml_cuda_dp4a(val2.x, q8[5], ggml_cuda_dp4a(val2.y, q8[7], sumi2)); - int is = 8*ib32 + i; - int ls1 = (((bq4->scales_l[is%32] >> 4*(is/32)) & 0xf) | (((bq4->scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32; - is += 4; - int ls2 = (((bq4->scales_l[is%32] >> 4*(is/32)) & 0xf) | (((bq4->scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32; const float d = __half2float(bq4->d[i]) * d8; - result[i] += d * (sumi1 * ls1 + sumi2 * ls2); + result[i] += d * (sumi1 * s8[i] + sumi2 * s8[i+4]); } } From 5ac189d46548ba05ad1b74ec89d362a3f74ca3f4 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 26 May 2025 11:31:54 +0300 Subject: [PATCH 04/11] CUDA: slightly faster iq4_k_r4 GEMV We are now within 3% of iq4_k --- ggml/src/ggml-cuda/iqk_mmvq.cu | 38 ++++++++++++++++------------------ 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index 842174b30..88eb798ac 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -21,6 +21,9 @@ struct ggml_cuda_type_traits { // constexpr int qi = ggml_cuda_type_traits::qi; // constexpr int vdr = get_vdr_mmvq(type); +// QI4_XS = 256/(4*2) = 32 +// vdr = 4, qi = 32 -> qi/vdr = 8, kqs = 4*(tid%8), blocks_per_iter = 4*1*32/32 = 4 +// vdr = 2, qi = 32 -> qi/vdr =16, kqs = 2*(tid%16), blocks_per_iter = 2*1*32/32 = 2 namespace { template __device__ void iqk_mul_mat_vec_q( @@ -254,39 +257,34 @@ static __device__ __forceinline__ int2 get_int_from_table_16(const int & q4, con return make_int2(*((const int *) &val0_8), *((const int *) &val1_8)); } -// TODO __device__ __forceinline__ void vec_dot_iq4_k_r4_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { const block_iq4_k_r4 * bq4 = (const block_iq4_k_r4 *)vbq + kbx; - // iqs is 0...28 - const int ib32 = iqs/4; - const float d8 = __low2float(bq8_1[ib32].ds); - const int32_t * q8 = (const int *)bq8_1[ib32].qs; + // iqs is 0...28 in steps of 2 + const int ib16 = iqs/2; + const float d8 = __low2float(bq8_1[ib16/2].ds); + const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2); - int scales[2]; + int ib32 = ib16/2; + int is = ib16%2; + int scales; const uint32_t * scales_l = (const uint32_t *)bq4->scales_l; const uint32_t * scales_h = (const uint32_t *)bq4->scales_h; - scales[0] = __vsub4(((scales_l[2*(ib32%4)+0] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+0] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020); - scales[1] = __vsub4(((scales_l[2*(ib32%4)+1] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+1] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020); - const int8_t * s8 = (const int8_t *)scales; + scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+is] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020); + const int8_t * s8 = (const int8_t *)&scales; int2 val1, val2; const int * q4 = (const int *)bq4->qs + 16*ib32; for (int i = 0; i < 4; ++i) { - auto values1 = iq4k_values + (((bq4->extra[i+0] >> ib32) & 1) << 4); - auto values2 = iq4k_values + (((bq4->extra[i+4] >> ib32) & 1) << 4); - int sumi1 = 0, sumi2 = 0; - val1 = get_int_from_table_16(q4[i+ 0], values1); + auto values1 = iq4k_values + (((bq4->extra[i+4*is] >> ib32) & 1) << 4); + int sumi1 = 0; + val1 = get_int_from_table_16(q4[i+4*is+0], values1); sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[2], sumi1)); - val2 = get_int_from_table_16(q4[i+ 4], values2); - sumi2 = ggml_cuda_dp4a(val2.x, q8[4], ggml_cuda_dp4a(val2.y, q8[6], sumi2)); - val1 = get_int_from_table_16(q4[i+ 8], values1); + val1 = get_int_from_table_16(q4[i+4*is+8], values1); sumi1 = ggml_cuda_dp4a(val1.x, q8[1], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); - val2 = get_int_from_table_16(q4[i+12], values2); - sumi2 = ggml_cuda_dp4a(val2.x, q8[5], ggml_cuda_dp4a(val2.y, q8[7], sumi2)); const float d = __half2float(bq4->d[i]) * d8; - result[i] += d * (sumi1 * s8[i] + sumi2 * s8[i+4]); + result[i] += d * sumi1 * s8[i]; } } @@ -863,7 +861,7 @@ void mul_mat_vec_iq4_k_r4_q8_1_cuda( const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) { - iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); + iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); } void mul_mat_vec_iq4_ks_q8_1_cuda( From 15adb7e0a78eace8d2128b720628cf1b4267c191 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 26 May 2025 12:42:47 +0300 Subject: [PATCH 05/11] CUDA: iq5_k_r4 dequantize --- ggml/src/ggml-cuda.cu | 1 + ggml/src/ggml-cuda/convert.cu | 62 +++++++++++++++++++++++++++++++++ ggml/src/ggml-cuda/iqk_mmvq.cu | 48 +++++++++++++++++++++++++ ggml/src/ggml-cuda/iqk_mmvq.cuh | 5 +++ ggml/src/ggml-cuda/mmvq.cu | 4 +++ 5 files changed, 120 insertions(+) diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index b5cc39538..92590f98a 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -3471,6 +3471,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons case GGML_TYPE_IQ1_BN: case GGML_TYPE_IQ2_BN: case GGML_TYPE_IQ4_K_R4: + case GGML_TYPE_IQ5_K_R4: return true; default: return false; diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu index a200a2128..8ef7b5ad9 100644 --- a/ggml/src/ggml-cuda/convert.cu +++ b/ggml/src/ggml-cuda/convert.cu @@ -838,6 +838,54 @@ static __global__ void dequantize_block_iq5_k(const void * __restrict__ vx, dst_ } } +template +static __global__ void dequantize_block_iq5_k_r4(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) { + + int64_t ii = blockIdx.x; + + int64_t nblock = n_per_row/256; + int64_t row = ii/nblock; + int64_t row4 = row/4; + int64_t ir = row%4; + int64_t ibl = row4*nblock + ii%nblock; + + const int tid = threadIdx.x; + const int il = tid/8; // 0...3 + const int ib = tid%8; // 0...7 + + const block_iq5_k_r4 * x = (const block_iq5_k_r4 *)vx; + dst_t * y = yy + 256*ii + 32*ib; + + const float d = __half2float(x[ibl].d[ir]); + int is = 8*ib + ir; + float dl1 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32); + is += 4; + float dl2 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32); + auto values1 = iq5nl_values + (((x[ibl].extra[ir+0] >> ib) & 1) << 5); + auto values2 = iq5nl_values + (((x[ibl].extra[ir+4] >> ib) & 1) << 5); + auto qs = x[ibl].qs + 64*ib + 4*ir; + auto qh = x[ibl].qh + 16*ib + 4*ir; + if constexpr (std::is_same_v) { + y[il+ 0] = __float2bfloat16(dl1 * values1[(qs[il+ 0] & 0xf) | (((qh[il] >> 0) & 1) << 4)]); + y[il+ 4] = __float2bfloat16(dl1 * values1[(qs[il+32] & 0xf) | (((qh[il] >> 4) & 1) << 4)]); + y[il+ 8] = __float2bfloat16(dl1 * values1[(qs[il+ 0] >> 4) | (((qh[il] >> 1) & 1) << 4)]); + y[il+12] = __float2bfloat16(dl1 * values1[(qs[il+32] >> 4) | (((qh[il] >> 5) & 1) << 4)]); + y[il+16] = __float2bfloat16(dl2 * values2[(qs[il+16] & 0xf) | (((qh[il] >> 2) & 1) << 4)]); + y[il+20] = __float2bfloat16(dl2 * values2[(qs[il+48] & 0xf) | (((qh[il] >> 6) & 1) << 4)]); + y[il+24] = __float2bfloat16(dl2 * values2[(qs[il+16] >> 4) | (((qh[il] >> 3) & 1) << 4)]); + y[il+28] = __float2bfloat16(dl2 * values2[(qs[il+48] >> 4) | (((qh[il] >> 7) & 1) << 4)]); + } else { + y[il+ 0] = dl1 * values1[(qs[il+ 0] & 0xf) | (((qh[il] >> 0) & 1) << 4)]; + y[il+ 4] = dl1 * values1[(qs[il+32] & 0xf) | (((qh[il] >> 4) & 1) << 4)]; + y[il+ 8] = dl1 * values1[(qs[il+ 0] >> 4) | (((qh[il] >> 1) & 1) << 4)]; + y[il+12] = dl1 * values1[(qs[il+32] >> 4) | (((qh[il] >> 5) & 1) << 4)]; + y[il+16] = dl2 * values2[(qs[il+16] & 0xf) | (((qh[il] >> 2) & 1) << 4)]; + y[il+20] = dl2 * values2[(qs[il+48] & 0xf) | (((qh[il] >> 6) & 1) << 4)]; + y[il+24] = dl2 * values2[(qs[il+16] >> 4) | (((qh[il] >> 3) & 1) << 4)]; + y[il+28] = dl2 * values2[(qs[il+48] >> 4) | (((qh[il] >> 7) & 1) << 4)]; + } +} + template static __global__ void dequantize_block_iq5_ks(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) { @@ -1271,6 +1319,14 @@ static void dequantize_row_iq5_k_cuda(const void * vx, dst_t * y, const int64_t dequantize_block_iq5_k<<>>(vx, y); } +template +static void dequantize_row_iq5_k_r4_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { + const int64_t k = nrows * n_per_row; + const int64_t row_size = ggml_row_size(GGML_TYPE_IQ4_K, n_per_row); + const int nb = (k + QK_K - 1) / QK_K; + dequantize_block_iq5_k_r4<<>>(vx, y, n_per_row, row_size); +} + template static void dequantize_row_iq6_k_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { const int64_t k = nrows * n_per_row; @@ -1369,6 +1425,8 @@ to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type) { return dequantize_row_iq6_k_cuda; case GGML_TYPE_IQ4_K_R4: return dequantize_row_iq4_k_r4_cuda; + case GGML_TYPE_IQ5_K_R4: + return dequantize_row_iq5_k_r4_cuda; default: return nullptr; } @@ -1453,6 +1511,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { return convert_from_bf16_cuda; case GGML_TYPE_IQ4_K_R4: return dequantize_row_iq4_k_r4_cuda; + case GGML_TYPE_IQ5_K_R4: + return dequantize_row_iq5_k_r4_cuda; default: return nullptr; } @@ -1534,6 +1594,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { return convert_from_bf16_cuda; case GGML_TYPE_IQ4_K_R4: return dequantize_row_iq4_k_r4_cuda; + case GGML_TYPE_IQ5_K_R4: + return dequantize_row_iq5_k_r4_cuda; default: return nullptr; } diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index 88eb798ac..1e1077a3e 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -15,6 +15,13 @@ struct ggml_cuda_type_traits { static constexpr int qi = QI4_XS; }; +template<> +struct ggml_cuda_type_traits { + static constexpr int qk = QK_K; + static constexpr int qr = QR5_XS; + static constexpr int qi = QI5_XS; +}; + // Reminder: // constexpr int qk = ggml_cuda_type_traits::qk; @@ -288,6 +295,39 @@ __device__ __forceinline__ void vec_dot_iq4_k_r4_q8_1( } } +__device__ __forceinline__ void vec_dot_iq5_k_r4_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { + + return; + + const block_iq4_k_r4 * bq4 = (const block_iq4_k_r4 *)vbq + kbx; + + // iqs is 0...28 in steps of 2 + const int ib16 = iqs/2; + const float d8 = __low2float(bq8_1[ib16/2].ds); + const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2); + + int ib32 = ib16/2; + int is = ib16%2; + int scales; + const uint32_t * scales_l = (const uint32_t *)bq4->scales_l; + const uint32_t * scales_h = (const uint32_t *)bq4->scales_h; + scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+is] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020); + const int8_t * s8 = (const int8_t *)&scales; + int2 val1, val2; + const int * q4 = (const int *)bq4->qs + 16*ib32; + for (int i = 0; i < 4; ++i) { + auto values1 = iq4k_values + (((bq4->extra[i+4*is] >> ib32) & 1) << 4); + int sumi1 = 0; + val1 = get_int_from_table_16(q4[i+4*is+0], values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[2], sumi1)); + val1 = get_int_from_table_16(q4[i+4*is+8], values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[1], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); + const float d = __half2float(bq4->d[i]) * d8; + result[i] += d * sumi1 * s8[i]; + } +} + #define VDR_IQ4_KS_Q8_1_MMVQ 4 #define VDR_IQ4_KS_Q8_1_MMQ 4 @@ -864,6 +904,14 @@ void mul_mat_vec_iq4_k_r4_q8_1_cuda( iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); } +void mul_mat_vec_iq5_k_r4_q8_1_cuda( + const void * vx, const void * vy, float * dst, const char * ids_data, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, + const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) { + + iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); +} + void mul_mat_vec_iq4_ks_q8_1_cuda( const void * vx, const void * vy, float * dst, const char * ids_data, const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cuh b/ggml/src/ggml-cuda/iqk_mmvq.cuh index 8de8e3fed..929debab3 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cuh +++ b/ggml/src/ggml-cuda/iqk_mmvq.cuh @@ -65,3 +65,8 @@ void mul_mat_vec_iq4_k_r4_q8_1_cuda( const void * vx, const void * vy, float * dst, const char * ids_data, const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream); + +void mul_mat_vec_iq5_k_r4_q8_1_cuda( + const void * vx, const void * vy, float * dst, const char * ids_data, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, + const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream); diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu index 19eb1aabf..670156cba 100644 --- a/ggml/src/ggml-cuda/mmvq.cu +++ b/ggml/src/ggml-cuda/mmvq.cu @@ -545,6 +545,9 @@ static void ggml_cuda_op_mul_mat_vec_q_impl(ggml_backend_cuda_context & ctx, ggm case GGML_TYPE_IQ4_K_R4: mul_mat_vec_iq4_k_r4_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); break; + case GGML_TYPE_IQ5_K_R4: + mul_mat_vec_iq5_k_r4_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); + break; default: GGML_ABORT("fatal error"); break; @@ -659,6 +662,7 @@ bool ggml_cuda_mmvq_type_supported(ggml_type src0_type) { case GGML_TYPE_IQ6_K: case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ4_K_R4: + case GGML_TYPE_IQ5_K_R4: return true; default: return false; From 4af604288dbe9d98dc2fd5cdc58232b0753e12d0 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 26 May 2025 13:10:49 +0300 Subject: [PATCH 06/11] CUDA: iq5_k_r4 GEMV ~3% slower than iq5_k. --- ggml/src/ggml-cuda/iqk_mmvq.cu | 75 +++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 34 deletions(-) diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index 1e1077a3e..e7a821a92 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -281,40 +281,7 @@ __device__ __forceinline__ void vec_dot_iq4_k_r4_q8_1( const uint32_t * scales_h = (const uint32_t *)bq4->scales_h; scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+is] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020); const int8_t * s8 = (const int8_t *)&scales; - int2 val1, val2; - const int * q4 = (const int *)bq4->qs + 16*ib32; - for (int i = 0; i < 4; ++i) { - auto values1 = iq4k_values + (((bq4->extra[i+4*is] >> ib32) & 1) << 4); - int sumi1 = 0; - val1 = get_int_from_table_16(q4[i+4*is+0], values1); - sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[2], sumi1)); - val1 = get_int_from_table_16(q4[i+4*is+8], values1); - sumi1 = ggml_cuda_dp4a(val1.x, q8[1], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); - const float d = __half2float(bq4->d[i]) * d8; - result[i] += d * sumi1 * s8[i]; - } -} - -__device__ __forceinline__ void vec_dot_iq5_k_r4_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { - - return; - - const block_iq4_k_r4 * bq4 = (const block_iq4_k_r4 *)vbq + kbx; - - // iqs is 0...28 in steps of 2 - const int ib16 = iqs/2; - const float d8 = __low2float(bq8_1[ib16/2].ds); - const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2); - - int ib32 = ib16/2; - int is = ib16%2; - int scales; - const uint32_t * scales_l = (const uint32_t *)bq4->scales_l; - const uint32_t * scales_h = (const uint32_t *)bq4->scales_h; - scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+is] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020); - const int8_t * s8 = (const int8_t *)&scales; - int2 val1, val2; + int2 val1; const int * q4 = (const int *)bq4->qs + 16*ib32; for (int i = 0; i < 4; ++i) { auto values1 = iq4k_values + (((bq4->extra[i+4*is] >> ib32) & 1) << 4); @@ -426,6 +393,46 @@ __device__ __forceinline__ void vec_dot_iq5_k_q8_1( *result += d5 * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2); } +__device__ __forceinline__ void vec_dot_iq5_k_r4_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { + + const block_iq5_k_r4 * bq5 = (const block_iq5_k_r4 *)vbq + kbx; + + // iqs is 0...28 in steps of 2 + const int ib16 = iqs/2; + const float d8 = __low2float(bq8_1[ib16/2].ds); + const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2); + + int ib32 = ib16/2; + int is = ib16%2; + int scales; + const uint32_t * scales_l = (const uint32_t *)bq5->scales_l; + const uint32_t * scales_h = (const uint32_t *)bq5->scales_h; + scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+is] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020); + const int8_t * s8 = (const int8_t *)&scales; + int2 val1; + const int * q4 = (const int *)bq5->qs + 16*ib32; + const int * qh = (const int *)bq5->qh + 4*ib32; + int aux32[2]; + const uint8_t * aux8 = (const uint8_t *)aux32; + for (int i = 0; i < 4; ++i) { + auto values1 = iq5nl_values + (((bq5->extra[i+4*is] >> ib32) & 1) << 5); + int sumi1 = 0; + aux32[0] = ((q4[i+4*is+0] >> 0) & 0x0f0f0f0f) | (((qh[i] >> (2*is+0)) & 0x01010101) << 4); + aux32[1] = ((q4[i+4*is+0] >> 4) & 0x0f0f0f0f) | (((qh[i] >> (2*is+1)) & 0x01010101) << 4); + val1.x = int_from_table(aux8+0, (const uint8_t *)values1); + val1.y = int_from_table(aux8+4, (const uint8_t *)values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[2], sumi1)); + aux32[0] = ((q4[i+4*is+8] >> 0) & 0x0f0f0f0f) | (((qh[i] >> (2*is+4)) & 0x01010101) << 4); + aux32[1] = ((q4[i+4*is+8] >> 4) & 0x0f0f0f0f) | (((qh[i] >> (2*is+5)) & 0x01010101) << 4); + val1.x = int_from_table(aux8+0, (const uint8_t *)values1); + val1.y = int_from_table(aux8+4, (const uint8_t *)values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[1], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); + const float d = __half2float(bq5->d[i]) * d8; + result[i] += d * sumi1 * s8[i]; + } +} + __device__ __forceinline__ void vec_dot_iq5_ks_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { From 395dc935fca59c9a26722361db8c38844182b9a3 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 26 May 2025 14:12:03 +0300 Subject: [PATCH 07/11] CUDA: iq3_k_r4 dequantize --- ggml/src/ggml-cuda.cu | 1 + ggml/src/ggml-cuda/convert.cu | 62 +++++++++++++++++++++++++++++++++ ggml/src/ggml-cuda/iqk_mmvq.cu | 57 ++++++++++++++++++++++++++++++ ggml/src/ggml-cuda/iqk_mmvq.cuh | 5 +++ ggml/src/ggml-cuda/mmvq.cu | 4 +++ 5 files changed, 129 insertions(+) diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index 92590f98a..e06a78265 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -3470,6 +3470,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons case GGML_TYPE_IQ6_K: case GGML_TYPE_IQ1_BN: case GGML_TYPE_IQ2_BN: + case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: case GGML_TYPE_IQ5_K_R4: return true; diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu index 8ef7b5ad9..a94c00e9b 100644 --- a/ggml/src/ggml-cuda/convert.cu +++ b/ggml/src/ggml-cuda/convert.cu @@ -886,6 +886,54 @@ static __global__ void dequantize_block_iq5_k_r4(const void * __restrict__ vx, d } } +template +static __global__ void dequantize_block_iq3_k_r4(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) { + + int64_t ii = blockIdx.x; + + int64_t nblock = n_per_row/256; + int64_t row = ii/nblock; + int64_t row4 = row/4; + int64_t ir = row%4; + int64_t ibl = row4*nblock + ii%nblock; + + const int tid = threadIdx.x; + const int il = tid/8; // 0...3 + const int ib = tid%8; // 0...7 + + const block_iq3_k_r4 * x = (const block_iq3_k_r4 *)vx; + dst_t * y = yy + 256*ii + 32*ib; + + const float d = __half2float(x[ibl].d[ir]); + int is = 8*ib + ir; + float dl1 = d * (2*((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) + 1) * ((x[ibl].scales_h[is%8] >> (is/8)) & 1 ? -1 : 1); + is += 4; + float dl2 = d * (2*((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) + 1) * ((x[ibl].scales_h[is%8] >> (is/8)) & 1 ? -1 : 1); + auto values1 = iq3nl_values + (((x[ibl].extra[ir+0] >> ib) & 1) << 3); + auto values2 = iq3nl_values + (((x[ibl].extra[ir+4] >> ib) & 1) << 3); + auto ql = x[ibl].qs + 32*ib + 4*ir; + auto qh = x[ibl].qh + 16*ib + 4*ir; + if constexpr (std::is_same_v) { + y[il+ 0] = __float2bfloat16(dl1 * values1[((ql[il+ 0] >> 0) & 3) | ((qh[il] << 2) & 4)]); + y[il+ 4] = __float2bfloat16(dl1 * values1[((ql[il+ 0] >> 2) & 3) | ((qh[il] << 1) & 4)]); + y[il+ 8] = __float2bfloat16(dl1 * values1[((ql[il+ 0] >> 4) & 3) | ((qh[il] << 0) & 4)]); + y[il+12] = __float2bfloat16(dl1 * values1[((ql[il+ 0] >> 6) & 3) | ((qh[il] >> 1) & 4)]); + y[il+16] = __float2bfloat16(dl2 * values2[((ql[il+16] >> 0) & 3) | ((qh[il] >> 2) & 4)]); + y[il+20] = __float2bfloat16(dl2 * values2[((ql[il+16] >> 2) & 3) | ((qh[il] >> 3) & 4)]); + y[il+24] = __float2bfloat16(dl2 * values2[((ql[il+16] >> 4) & 3) | ((qh[il] >> 4) & 4)]); + y[il+28] = __float2bfloat16(dl2 * values2[((ql[il+16] >> 6) & 3) | ((qh[il] >> 5) & 4)]); + } else { + y[il+ 0] = dl1 * values1[((ql[il+ 0] >> 0) & 3) | ((qh[il] << 2) & 4)]; + y[il+ 4] = dl1 * values1[((ql[il+ 0] >> 2) & 3) | ((qh[il] << 1) & 4)]; + y[il+ 8] = dl1 * values1[((ql[il+ 0] >> 4) & 3) | ((qh[il] << 0) & 4)]; + y[il+12] = dl1 * values1[((ql[il+ 0] >> 6) & 3) | ((qh[il] >> 1) & 4)]; + y[il+16] = dl2 * values2[((ql[il+16] >> 0) & 3) | ((qh[il] >> 2) & 4)]; + y[il+20] = dl2 * values2[((ql[il+16] >> 2) & 3) | ((qh[il] >> 3) & 4)]; + y[il+24] = dl2 * values2[((ql[il+16] >> 4) & 3) | ((qh[il] >> 4) & 4)]; + y[il+28] = dl2 * values2[((ql[il+16] >> 6) & 3) | ((qh[il] >> 5) & 4)]; + } +} + template static __global__ void dequantize_block_iq5_ks(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) { @@ -1297,6 +1345,14 @@ static void dequantize_row_iq3_k_cuda(const void * vx, dst_t * y, const int64_t dequantize_block_iq3_k<<>>(vx, y); } +template +static void dequantize_row_iq3_k_r4_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { + const int64_t k = nrows * n_per_row; + const int64_t row_size = ggml_row_size(GGML_TYPE_IQ4_K, n_per_row); + const int nb = (k + QK_K - 1) / QK_K; + dequantize_block_iq3_k_r4<<>>(vx, y, n_per_row, row_size); +} + template static void dequantize_row_iq4_k_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { const int64_t k = nrows * n_per_row; @@ -1423,6 +1479,8 @@ to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type) { return dequantize_row_iq5_k_cuda; case GGML_TYPE_IQ6_K: return dequantize_row_iq6_k_cuda; + case GGML_TYPE_IQ3_K_R4: + return dequantize_row_iq3_k_r4_cuda; case GGML_TYPE_IQ4_K_R4: return dequantize_row_iq4_k_r4_cuda; case GGML_TYPE_IQ5_K_R4: @@ -1509,6 +1567,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { return convert_unary_cuda; case GGML_TYPE_BF16: return convert_from_bf16_cuda; + case GGML_TYPE_IQ3_K_R4: + return dequantize_row_iq3_k_r4_cuda; case GGML_TYPE_IQ4_K_R4: return dequantize_row_iq4_k_r4_cuda; case GGML_TYPE_IQ5_K_R4: @@ -1592,6 +1652,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { return convert_unary_cuda; case GGML_TYPE_BF16: return convert_from_bf16_cuda; + case GGML_TYPE_IQ3_K_R4: + return dequantize_row_iq3_k_r4_cuda; case GGML_TYPE_IQ4_K_R4: return dequantize_row_iq4_k_r4_cuda; case GGML_TYPE_IQ5_K_R4: diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index e7a821a92..136c8ebdb 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -8,6 +8,13 @@ typedef void (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float *); +template<> +struct ggml_cuda_type_traits { + static constexpr int qk = QK_K; + static constexpr int qr = QR4_XS; + static constexpr int qi = QI4_XS; +}; + template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; @@ -466,6 +473,48 @@ __device__ __forceinline__ void vec_dot_iq5_ks_q8_1( *result += scale * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2); } +__device__ __forceinline__ void vec_dot_iq3_k_r4_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { + + return; + + const block_iq5_k_r4 * bq5 = (const block_iq5_k_r4 *)vbq + kbx; + + // iqs is 0...28 in steps of 2 + const int ib16 = iqs/2; + const float d8 = __low2float(bq8_1[ib16/2].ds); + const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2); + + int ib32 = ib16/2; + int is = ib16%2; + int scales; + const uint32_t * scales_l = (const uint32_t *)bq5->scales_l; + const uint32_t * scales_h = (const uint32_t *)bq5->scales_h; + scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+is] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020); + const int8_t * s8 = (const int8_t *)&scales; + int2 val1; + const int * q4 = (const int *)bq5->qs + 16*ib32; + const int * qh = (const int *)bq5->qh + 4*ib32; + int aux32[2]; + const uint8_t * aux8 = (const uint8_t *)aux32; + for (int i = 0; i < 4; ++i) { + auto values1 = iq5nl_values + (((bq5->extra[i+4*is] >> ib32) & 1) << 5); + int sumi1 = 0; + aux32[0] = ((q4[i+4*is+0] >> 0) & 0x0f0f0f0f) | (((qh[i] >> (2*is+0)) & 0x01010101) << 4); + aux32[1] = ((q4[i+4*is+0] >> 4) & 0x0f0f0f0f) | (((qh[i] >> (2*is+1)) & 0x01010101) << 4); + val1.x = int_from_table(aux8+0, (const uint8_t *)values1); + val1.y = int_from_table(aux8+4, (const uint8_t *)values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[2], sumi1)); + aux32[0] = ((q4[i+4*is+8] >> 0) & 0x0f0f0f0f) | (((qh[i] >> (2*is+4)) & 0x01010101) << 4); + aux32[1] = ((q4[i+4*is+8] >> 4) & 0x0f0f0f0f) | (((qh[i] >> (2*is+5)) & 0x01010101) << 4); + val1.x = int_from_table(aux8+0, (const uint8_t *)values1); + val1.y = int_from_table(aux8+4, (const uint8_t *)values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[1], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); + const float d = __half2float(bq5->d[i]) * d8; + result[i] += d * sumi1 * s8[i]; + } +} + #define VDR_IQ6_K_Q8_1_MMVQ 4 #define VDR_IQ6_K_Q8_1_MMQ 4 @@ -919,6 +968,14 @@ void mul_mat_vec_iq5_k_r4_q8_1_cuda( iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); } +void mul_mat_vec_iq3_k_r4_q8_1_cuda( + const void * vx, const void * vy, float * dst, const char * ids_data, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, + const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) { + + iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); +} + void mul_mat_vec_iq4_ks_q8_1_cuda( const void * vx, const void * vy, float * dst, const char * ids_data, const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cuh b/ggml/src/ggml-cuda/iqk_mmvq.cuh index 929debab3..73c1a9460 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cuh +++ b/ggml/src/ggml-cuda/iqk_mmvq.cuh @@ -61,6 +61,11 @@ void mul_mat_vec_iq2_bn_q8_1_cuda( const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream); +void mul_mat_vec_iq3_k_r4_q8_1_cuda( + const void * vx, const void * vy, float * dst, const char * ids_data, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, + const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream); + void mul_mat_vec_iq4_k_r4_q8_1_cuda( const void * vx, const void * vy, float * dst, const char * ids_data, const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu index 670156cba..e1cc9bc02 100644 --- a/ggml/src/ggml-cuda/mmvq.cu +++ b/ggml/src/ggml-cuda/mmvq.cu @@ -542,6 +542,9 @@ static void ggml_cuda_op_mul_mat_vec_q_impl(ggml_backend_cuda_context & ctx, ggm case GGML_TYPE_IQ3_S: mul_mat_vec_iq3_s_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); break; + case GGML_TYPE_IQ3_K_R4: + mul_mat_vec_iq3_k_r4_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); + break; case GGML_TYPE_IQ4_K_R4: mul_mat_vec_iq4_k_r4_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); break; @@ -661,6 +664,7 @@ bool ggml_cuda_mmvq_type_supported(ggml_type src0_type) { case GGML_TYPE_IQ5_KS: case GGML_TYPE_IQ6_K: case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: case GGML_TYPE_IQ5_K_R4: return true; From 1faa7d977b4fb3fa64320a5259dceb9522203b61 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 26 May 2025 14:49:58 +0300 Subject: [PATCH 08/11] CUDA: iq3_k_r4 GEMV --- ggml/src/ggml-cuda/iqk_mmvq.cu | 37 +++++++++++++++++----------------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index 136c8ebdb..8093428e5 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -476,42 +476,43 @@ __device__ __forceinline__ void vec_dot_iq5_ks_q8_1( __device__ __forceinline__ void vec_dot_iq3_k_r4_q8_1( const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { - return; - - const block_iq5_k_r4 * bq5 = (const block_iq5_k_r4 *)vbq + kbx; + const block_iq3_k_r4 * bq3 = (const block_iq3_k_r4 *)vbq + kbx; // iqs is 0...28 in steps of 2 const int ib16 = iqs/2; const float d8 = __low2float(bq8_1[ib16/2].ds); const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2); + // (8*ib32 + i)%32 + // (8*ib32 + i)%8: ib32=0->i, ib32=i int ib32 = ib16/2; int is = ib16%2; - int scales; - const uint32_t * scales_l = (const uint32_t *)bq5->scales_l; - const uint32_t * scales_h = (const uint32_t *)bq5->scales_h; - scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+is] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020); + int scales[2]; + const uint32_t * scales_l = (const uint32_t *)bq3->scales_l; + const uint32_t * scales_h = (const uint32_t *)bq3->scales_h; + scales[0] = (((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) << 1) | 0x01010101; + scales[1] = (scales_h[is] >> ib32) & 0x01010101; const int8_t * s8 = (const int8_t *)&scales; int2 val1; - const int * q4 = (const int *)bq5->qs + 16*ib32; - const int * qh = (const int *)bq5->qh + 4*ib32; + const int * q2 = (const int *)bq3->qs + 8*ib32; + const int * qh = (const int *)bq3->qh + 4*ib32; int aux32[2]; const uint8_t * aux8 = (const uint8_t *)aux32; for (int i = 0; i < 4; ++i) { - auto values1 = iq5nl_values + (((bq5->extra[i+4*is] >> ib32) & 1) << 5); + auto values1 = iq3nl_values + (((bq3->extra[i+4*is] >> ib32) & 1) << 3); int sumi1 = 0; - aux32[0] = ((q4[i+4*is+0] >> 0) & 0x0f0f0f0f) | (((qh[i] >> (2*is+0)) & 0x01010101) << 4); - aux32[1] = ((q4[i+4*is+0] >> 4) & 0x0f0f0f0f) | (((qh[i] >> (2*is+1)) & 0x01010101) << 4); + aux32[0] = ((q2[i+4*is] >> 0) & 0x03030303) | (((qh[i] >> (4*is+0)) & 0x01010101) << 2); + aux32[1] = ((q2[i+4*is] >> 2) & 0x03030303) | (((qh[i] >> (4*is+1)) & 0x01010101) << 2); val1.x = int_from_table(aux8+0, (const uint8_t *)values1); val1.y = int_from_table(aux8+4, (const uint8_t *)values1); - sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[2], sumi1)); - aux32[0] = ((q4[i+4*is+8] >> 0) & 0x0f0f0f0f) | (((qh[i] >> (2*is+4)) & 0x01010101) << 4); - aux32[1] = ((q4[i+4*is+8] >> 4) & 0x0f0f0f0f) | (((qh[i] >> (2*is+5)) & 0x01010101) << 4); + sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[1], sumi1)); + aux32[0] = ((q2[i+4*is] >> 4) & 0x03030303) | (((qh[i] >> (4*is+2)) & 0x01010101) << 2); + aux32[1] = ((q2[i+4*is] >> 6) & 0x03030303) | (((qh[i] >> (4*is+3)) & 0x01010101) << 2); val1.x = int_from_table(aux8+0, (const uint8_t *)values1); val1.y = int_from_table(aux8+4, (const uint8_t *)values1); - sumi1 = ggml_cuda_dp4a(val1.x, q8[1], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); - const float d = __half2float(bq5->d[i]) * d8; - result[i] += d * sumi1 * s8[i]; + sumi1 = ggml_cuda_dp4a(val1.x, q8[2], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); + const float d = __half2float(bq3->d[i]) * d8; + result[i] += d * sumi1 * s8[i] * (s8[i+4] ? -1 : 1); } } From aaf6d347895a1edef6036926e588e3c696172811 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 26 May 2025 15:05:02 +0300 Subject: [PATCH 09/11] CUDA: slightly faster iq3_k_r4 GEMV --- ggml/src/ggml-cuda/iqk_mmvq.cu | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index 8093428e5..df72e0c20 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -478,41 +478,45 @@ __device__ __forceinline__ void vec_dot_iq3_k_r4_q8_1( const block_iq3_k_r4 * bq3 = (const block_iq3_k_r4 *)vbq + kbx; - // iqs is 0...28 in steps of 2 + // iqs is 0...30 in steps of 2 const int ib16 = iqs/2; const float d8 = __low2float(bq8_1[ib16/2].ds); const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2); - // (8*ib32 + i)%32 - // (8*ib32 + i)%8: ib32=0->i, ib32=i int ib32 = ib16/2; int is = ib16%2; int scales[2]; const uint32_t * scales_l = (const uint32_t *)bq3->scales_l; const uint32_t * scales_h = (const uint32_t *)bq3->scales_h; + scales[0] = (((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) << 1) | 0x01010101; scales[1] = (scales_h[is] >> ib32) & 0x01010101; + // This is not faster. Why? + //scales[1] = __vcmpeq4((scales_h[is] >> ib32) & 0x01010101, 0x01010101); + //scales[0] = __vsub4(scales[0] ^ scales[1], scales[1]); const int8_t * s8 = (const int8_t *)&scales; int2 val1; - const int * q2 = (const int *)bq3->qs + 8*ib32; + const int * q2 = (const int *)bq3->qs + 8*ib32 + 4*is; const int * qh = (const int *)bq3->qh + 4*ib32; int aux32[2]; const uint8_t * aux8 = (const uint8_t *)aux32; for (int i = 0; i < 4; ++i) { auto values1 = iq3nl_values + (((bq3->extra[i+4*is] >> ib32) & 1) << 3); int sumi1 = 0; - aux32[0] = ((q2[i+4*is] >> 0) & 0x03030303) | (((qh[i] >> (4*is+0)) & 0x01010101) << 2); - aux32[1] = ((q2[i+4*is] >> 2) & 0x03030303) | (((qh[i] >> (4*is+1)) & 0x01010101) << 2); + int h = qh[i] >> 4*is; + aux32[0] = ((q2[i] >> 0) & 0x03030303) | ((h << 2) & 0x04040404); + aux32[1] = ((q2[i] >> 2) & 0x03030303) | ((h << 1) & 0x04040404); val1.x = int_from_table(aux8+0, (const uint8_t *)values1); val1.y = int_from_table(aux8+4, (const uint8_t *)values1); sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[1], sumi1)); - aux32[0] = ((q2[i+4*is] >> 4) & 0x03030303) | (((qh[i] >> (4*is+2)) & 0x01010101) << 2); - aux32[1] = ((q2[i+4*is] >> 6) & 0x03030303) | (((qh[i] >> (4*is+3)) & 0x01010101) << 2); + aux32[0] = ((q2[i] >> 4) & 0x03030303) | ((h >> 0) & 0x04040404); + aux32[1] = ((q2[i] >> 6) & 0x03030303) | ((h >> 1) & 0x04040404); val1.x = int_from_table(aux8+0, (const uint8_t *)values1); val1.y = int_from_table(aux8+4, (const uint8_t *)values1); sumi1 = ggml_cuda_dp4a(val1.x, q8[2], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); const float d = __half2float(bq3->d[i]) * d8; result[i] += d * sumi1 * s8[i] * (s8[i+4] ? -1 : 1); + //result[i] += d * sumi1 * s8[i]; } } From fa011c9017630ba46a20c7f91eb67f3cfbffaf50 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 26 May 2025 16:13:54 +0300 Subject: [PATCH 10/11] CUDA: iq2_k_r4 GEMV --- ggml/src/ggml-cuda.cu | 1 + ggml/src/ggml-cuda/convert.cu | 61 +++++++++++++++++++++++++++++++++ ggml/src/ggml-cuda/iqk_mmvq.cu | 56 +++++++++++++++++++++++++++++- ggml/src/ggml-cuda/iqk_mmvq.cuh | 5 +++ ggml/src/ggml-cuda/mmvq.cu | 4 +++ 5 files changed, 126 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index e06a78265..6331bc174 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -3470,6 +3470,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons case GGML_TYPE_IQ6_K: case GGML_TYPE_IQ1_BN: case GGML_TYPE_IQ2_BN: + case GGML_TYPE_IQ2_K_R4: case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: case GGML_TYPE_IQ5_K_R4: diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu index a94c00e9b..2ccca01bd 100644 --- a/ggml/src/ggml-cuda/convert.cu +++ b/ggml/src/ggml-cuda/convert.cu @@ -886,6 +886,53 @@ static __global__ void dequantize_block_iq5_k_r4(const void * __restrict__ vx, d } } +template +static __global__ void dequantize_block_iq2_k_r4(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) { + + int64_t ii = blockIdx.x; + + int64_t nblock = n_per_row/256; + int64_t row = ii/nblock; + int64_t row4 = row/4; + int64_t ir = row%4; + int64_t ibl = row4*nblock + ii%nblock; + + const int tid = threadIdx.x; + const int il = tid/8; // 0...3 + const int ib = tid%8; // 0...7 + + const block_iq2_k_r4 * x = (const block_iq2_k_r4 *)vx; + dst_t * y = yy + 256*ii + 32*ib; + + const float d = __half2float(x[ibl].d[ir]); + int is = 8*ib + ir; + float dl1 = d * (((x[ibl].scales[is%32] >> 4*(is/32)) & 0xf) - 8); + is += 4; + float dl2 = d * (((x[ibl].scales[is%32] >> 4*(is/32)) & 0xf) - 8); + auto values1 = iq2nl_values + (((x[ibl].extra[ir+0] >> ib) & 1) << 2); + auto values2 = iq2nl_values + (((x[ibl].extra[ir+4] >> ib) & 1) << 2); + auto ql = x[ibl].qs + 32*ib + 4*ir; + if constexpr (std::is_same_v) { + y[il+ 0] = __float2bfloat16(dl1 * values1[(ql[il+ 0] >> 0) & 3]); + y[il+ 4] = __float2bfloat16(dl1 * values1[(ql[il+ 0] >> 2) & 3]); + y[il+ 8] = __float2bfloat16(dl1 * values1[(ql[il+ 0] >> 4) & 3]); + y[il+12] = __float2bfloat16(dl1 * values1[(ql[il+ 0] >> 6) & 3]); + y[il+16] = __float2bfloat16(dl2 * values2[(ql[il+16] >> 0) & 3]); + y[il+20] = __float2bfloat16(dl2 * values2[(ql[il+16] >> 2) & 3]); + y[il+24] = __float2bfloat16(dl2 * values2[(ql[il+16] >> 4) & 3]); + y[il+28] = __float2bfloat16(dl2 * values2[(ql[il+16] >> 6) & 3]); + } else { + y[il+ 0] = dl1 * values1[(ql[il+ 0] >> 0) & 3]; + y[il+ 4] = dl1 * values1[(ql[il+ 0] >> 2) & 3]; + y[il+ 8] = dl1 * values1[(ql[il+ 0] >> 4) & 3]; + y[il+12] = dl1 * values1[(ql[il+ 0] >> 6) & 3]; + y[il+16] = dl2 * values2[(ql[il+16] >> 0) & 3]; + y[il+20] = dl2 * values2[(ql[il+16] >> 2) & 3]; + y[il+24] = dl2 * values2[(ql[il+16] >> 4) & 3]; + y[il+28] = dl2 * values2[(ql[il+16] >> 6) & 3]; + } +} + template static __global__ void dequantize_block_iq3_k_r4(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) { @@ -1353,6 +1400,14 @@ static void dequantize_row_iq3_k_r4_cuda(const void * vx, dst_t * y, const int64 dequantize_block_iq3_k_r4<<>>(vx, y, n_per_row, row_size); } +template +static void dequantize_row_iq2_k_r4_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { + const int64_t k = nrows * n_per_row; + const int64_t row_size = ggml_row_size(GGML_TYPE_IQ4_K, n_per_row); + const int nb = (k + QK_K - 1) / QK_K; + dequantize_block_iq2_k_r4<<>>(vx, y, n_per_row, row_size); +} + template static void dequantize_row_iq4_k_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) { const int64_t k = nrows * n_per_row; @@ -1479,6 +1534,8 @@ to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type) { return dequantize_row_iq5_k_cuda; case GGML_TYPE_IQ6_K: return dequantize_row_iq6_k_cuda; + case GGML_TYPE_IQ2_K_R4: + return dequantize_row_iq2_k_r4_cuda; case GGML_TYPE_IQ3_K_R4: return dequantize_row_iq3_k_r4_cuda; case GGML_TYPE_IQ4_K_R4: @@ -1567,6 +1624,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { return convert_unary_cuda; case GGML_TYPE_BF16: return convert_from_bf16_cuda; + case GGML_TYPE_IQ2_K_R4: + return dequantize_row_iq2_k_r4_cuda; case GGML_TYPE_IQ3_K_R4: return dequantize_row_iq3_k_r4_cuda; case GGML_TYPE_IQ4_K_R4: @@ -1652,6 +1711,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { return convert_unary_cuda; case GGML_TYPE_BF16: return convert_from_bf16_cuda; + case GGML_TYPE_IQ2_K_R4: + return dequantize_row_iq2_k_r4_cuda; case GGML_TYPE_IQ3_K_R4: return dequantize_row_iq3_k_r4_cuda; case GGML_TYPE_IQ4_K_R4: diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index df72e0c20..23f019ec5 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -8,6 +8,13 @@ typedef void (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float *); +template<> +struct ggml_cuda_type_traits { + static constexpr int qk = QK_K; + static constexpr int qr = QR4_XS; + static constexpr int qi = QI4_XS; +}; + template<> struct ggml_cuda_type_traits { static constexpr int qk = QK_K; @@ -494,7 +501,7 @@ __device__ __forceinline__ void vec_dot_iq3_k_r4_q8_1( // This is not faster. Why? //scales[1] = __vcmpeq4((scales_h[is] >> ib32) & 0x01010101, 0x01010101); //scales[0] = __vsub4(scales[0] ^ scales[1], scales[1]); - const int8_t * s8 = (const int8_t *)&scales; + const int8_t * s8 = (const int8_t *)scales; int2 val1; const int * q2 = (const int *)bq3->qs + 8*ib32 + 4*is; const int * qh = (const int *)bq3->qh + 4*ib32; @@ -520,6 +527,45 @@ __device__ __forceinline__ void vec_dot_iq3_k_r4_q8_1( } } +__device__ __forceinline__ void vec_dot_iq2_k_r4_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { + + const block_iq2_k_r4 * bq2 = (const block_iq2_k_r4 *)vbq + kbx; + + // iqs is 0...30 in steps of 2 + const int ib16 = iqs/2; + const float d8 = __low2float(bq8_1[ib16/2].ds); + const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2); + + int ib32 = ib16/2; + int is = ib16%2; + const int * scales_l = (const int *)bq2->scales; + + int scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f), 0x08080808); + const int8_t * s8 = (const int8_t *)&scales; + int2 val1; + const int * q2 = (const int *)bq2->qs + 8*ib32 + 4*is; + int aux32[2]; + const uint8_t * aux8 = (const uint8_t *)aux32; + for (int i = 0; i < 4; ++i) { + auto values1 = iq2nl_values + (((bq2->extra[i+4*is] >> ib32) & 1) << 2); + int sumi1 = 0; + aux32[0] = ((q2[i] >> 0) & 0x03030303); + aux32[1] = ((q2[i] >> 2) & 0x03030303); + // TODO: int_from_table_4 + val1.x = int_from_table(aux8+0, (const uint8_t *)values1); + val1.y = int_from_table(aux8+4, (const uint8_t *)values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[1], sumi1)); + aux32[0] = ((q2[i] >> 4) & 0x03030303); + aux32[1] = ((q2[i] >> 6) & 0x03030303); + val1.x = int_from_table(aux8+0, (const uint8_t *)values1); + val1.y = int_from_table(aux8+4, (const uint8_t *)values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[2], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); + const float d = __half2float(bq2->d[i]) * d8; + result[i] += d * sumi1 * s8[i]; + } +} + #define VDR_IQ6_K_Q8_1_MMVQ 4 #define VDR_IQ6_K_Q8_1_MMQ 4 @@ -973,6 +1019,14 @@ void mul_mat_vec_iq5_k_r4_q8_1_cuda( iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); } +void mul_mat_vec_iq2_k_r4_q8_1_cuda( + const void * vx, const void * vy, float * dst, const char * ids_data, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, + const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) { + + iqk_mul_mat_vec_q_cuda(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); +} + void mul_mat_vec_iq3_k_r4_q8_1_cuda( const void * vx, const void * vy, float * dst, const char * ids_data, const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cuh b/ggml/src/ggml-cuda/iqk_mmvq.cuh index 73c1a9460..228c513b9 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cuh +++ b/ggml/src/ggml-cuda/iqk_mmvq.cuh @@ -61,6 +61,11 @@ void mul_mat_vec_iq2_bn_q8_1_cuda( const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream); +void mul_mat_vec_iq2_k_r4_q8_1_cuda( + const void * vx, const void * vy, float * dst, const char * ids_data, + const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, + const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream); + void mul_mat_vec_iq3_k_r4_q8_1_cuda( const void * vx, const void * vy, float * dst, const char * ids_data, const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu index e1cc9bc02..d7bed266c 100644 --- a/ggml/src/ggml-cuda/mmvq.cu +++ b/ggml/src/ggml-cuda/mmvq.cu @@ -542,6 +542,9 @@ static void ggml_cuda_op_mul_mat_vec_q_impl(ggml_backend_cuda_context & ctx, ggm case GGML_TYPE_IQ3_S: mul_mat_vec_iq3_s_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); break; + case GGML_TYPE_IQ2_K_R4: + mul_mat_vec_iq2_k_r4_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); + break; case GGML_TYPE_IQ3_K_R4: mul_mat_vec_iq3_k_r4_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream); break; @@ -664,6 +667,7 @@ bool ggml_cuda_mmvq_type_supported(ggml_type src0_type) { case GGML_TYPE_IQ5_KS: case GGML_TYPE_IQ6_K: case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_K_R4: case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: case GGML_TYPE_IQ5_K_R4: From 1a8145e48edc222ba599366e29ab6e2b53a5c0f8 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 26 May 2025 16:29:36 +0300 Subject: [PATCH 11/11] CUDA: faster iq2_k_r4 GEMV --- ggml/src/ggml-cuda/iqk_mmvq.cu | 80 +++++++++++++++++----------------- 1 file changed, 41 insertions(+), 39 deletions(-) diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu index 23f019ec5..20bacd976 100644 --- a/ggml/src/ggml-cuda/iqk_mmvq.cu +++ b/ggml/src/ggml-cuda/iqk_mmvq.cu @@ -527,45 +527,6 @@ __device__ __forceinline__ void vec_dot_iq3_k_r4_q8_1( } } -__device__ __forceinline__ void vec_dot_iq2_k_r4_q8_1( - const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { - - const block_iq2_k_r4 * bq2 = (const block_iq2_k_r4 *)vbq + kbx; - - // iqs is 0...30 in steps of 2 - const int ib16 = iqs/2; - const float d8 = __low2float(bq8_1[ib16/2].ds); - const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2); - - int ib32 = ib16/2; - int is = ib16%2; - const int * scales_l = (const int *)bq2->scales; - - int scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f), 0x08080808); - const int8_t * s8 = (const int8_t *)&scales; - int2 val1; - const int * q2 = (const int *)bq2->qs + 8*ib32 + 4*is; - int aux32[2]; - const uint8_t * aux8 = (const uint8_t *)aux32; - for (int i = 0; i < 4; ++i) { - auto values1 = iq2nl_values + (((bq2->extra[i+4*is] >> ib32) & 1) << 2); - int sumi1 = 0; - aux32[0] = ((q2[i] >> 0) & 0x03030303); - aux32[1] = ((q2[i] >> 2) & 0x03030303); - // TODO: int_from_table_4 - val1.x = int_from_table(aux8+0, (const uint8_t *)values1); - val1.y = int_from_table(aux8+4, (const uint8_t *)values1); - sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[1], sumi1)); - aux32[0] = ((q2[i] >> 4) & 0x03030303); - aux32[1] = ((q2[i] >> 6) & 0x03030303); - val1.x = int_from_table(aux8+0, (const uint8_t *)values1); - val1.y = int_from_table(aux8+4, (const uint8_t *)values1); - sumi1 = ggml_cuda_dp4a(val1.x, q8[2], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); - const float d = __half2float(bq2->d[i]) * d8; - result[i] += d * sumi1 * s8[i]; - } -} - #define VDR_IQ6_K_Q8_1_MMVQ 4 #define VDR_IQ6_K_Q8_1_MMQ 4 @@ -793,6 +754,47 @@ __device__ __forceinline__ void vec_dot_iq2_ks_q8_1( + __low2float(bq8_1[4*(i4/4)+3].ds) * sumi4); } +__device__ __forceinline__ void vec_dot_iq2_k_r4_q8_1( + const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) { + + const block_iq2_k_r4 * bq2 = (const block_iq2_k_r4 *)vbq + kbx; + + // iqs is 0...30 in steps of 2 + const int ib16 = iqs/2; + const float d8 = __low2float(bq8_1[ib16/2].ds); + const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2); + + int ib32 = ib16/2; + int is = ib16%2; + const int * scales_l = (const int *)bq2->scales; + + const int * all_values = (const int *)iq2k_table; + + int scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f), 0x08080808); + const int8_t * s8 = (const int8_t *)&scales; + int2 val1; + const int * q2 = (const int *)bq2->qs + 8*ib32 + 4*is; + int aux32[2]; + const uint8_t * aux8 = (const uint8_t *)aux32; +#pragma unroll + for (int i = 0; i < 4; ++i) { + auto values1 = all_values + (((bq2->extra[i+4*is] >> ib32) & 1) << 8); + int sumi1 = 0; + aux32[0] = ((q2[i] >> 0) & 0x03030303); + aux32[1] = ((q2[i] >> 2) & 0x03030303); + val1.x = int_from_table_4(aux8+0, values1); + val1.y = int_from_table_4(aux8+4, values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[1], sumi1)); + aux32[0] = ((q2[i] >> 4) & 0x03030303); + aux32[1] = ((q2[i] >> 6) & 0x03030303); + val1.x = int_from_table_4(aux8+0, values1); + val1.y = int_from_table_4(aux8+4, values1); + sumi1 = ggml_cuda_dp4a(val1.x, q8[2], ggml_cuda_dp4a(val1.y, q8[3], sumi1)); + const float d = __half2float(bq2->d[i]) * d8; + result[i] += d * sumi1 * s8[i]; + } +} + #define VDR_IQ3_K_Q8_1_MMVQ 4 #define VDR_IQ3_K_Q8_1_MMQ 4