Skip to content

Commit 2489ae6

Browse files
netrunnerevearthw
authored andcommitted
Q6_K AVX improvements (ggml-org#10118)
* q6_k instruction reordering attempt * better subtract method * should be theoretically faster small improvement with shuffle lut, likely because all loads are already done at that stage * optimize bit fiddling * handle -32 offset separately. bsums exists for a reason! * use shift * Update ggml-quants.c * have to update ci macos version to 13 as 12 doesnt work now. 13 is still x86
1 parent b579fd6 commit 2489ae6

File tree

2 files changed

+38
-51
lines changed

2 files changed

+38
-51
lines changed

.github/workflows/build.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ jobs:
9292
name: llama-bin-macos-arm64.zip
9393

9494
macOS-latest-cmake-x64:
95-
runs-on: macos-12
95+
runs-on: macos-13
9696

9797
steps:
9898
- name: Clone

ggml/src/ggml-quants.c

Lines changed: 37 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -9104,10 +9104,8 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
91049104

91059105
#elif defined __AVX__
91069106

9107-
const __m128i m4 = _mm_set1_epi8(0xF);
91089107
const __m128i m3 = _mm_set1_epi8(3);
9109-
const __m128i m32s = _mm_set1_epi8(32);
9110-
const __m128i m2 = _mm_set1_epi8(2);
9108+
const __m128i m15 = _mm_set1_epi8(15);
91119109

91129110
__m256 acc = _mm256_setzero_ps();
91139111

@@ -9119,39 +9117,47 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
91199117
const uint8_t * restrict qh = x[i].qh;
91209118
const int8_t * restrict q8 = y[i].qs;
91219119

9120+
// handle the q6_k -32 offset separately using bsums
9121+
const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)y[i].bsums);
9122+
const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)y[i].bsums + 1);
91229123
const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
9124+
const __m128i scales_16_0 = _mm_cvtepi8_epi16(scales);
9125+
const __m128i scales_16_1 = _mm_cvtepi8_epi16(_mm_bsrli_si128(scales, 8));
9126+
const __m128i q8sclsub_0 = _mm_slli_epi32(_mm_madd_epi16(q8sums_0, scales_16_0), 5);
9127+
const __m128i q8sclsub_1 = _mm_slli_epi32(_mm_madd_epi16(q8sums_1, scales_16_1), 5);
91239128

91249129
__m128i sumi_0 = _mm_setzero_si128();
91259130
__m128i sumi_1 = _mm_setzero_si128();
91269131

9127-
__m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
9132+
int is = 0;
9133+
91289134
for (int j = 0; j < QK_K/128; ++j) {
91299135

91309136
const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
91319137
const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
91329138

91339139
const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
91349140
const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
9135-
const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
9136-
const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
9137-
const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
9138-
const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
9139-
const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
9140-
const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
9141+
const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(12)), 2);
9142+
const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(12)), 2);
9143+
const __m128i q4h_4 = _mm_and_si128(q4bitsH_0, _mm_set1_epi8(48));
9144+
const __m128i q4h_5 = _mm_and_si128(q4bitsH_1, _mm_set1_epi8(48));
9145+
const __m128i q4h_6 = _mm_srli_epi16(_mm_and_si128(q4bitsH_0, _mm_set1_epi8(-64)), 2);
9146+
const __m128i q4h_7 = _mm_srli_epi16(_mm_and_si128(q4bitsH_1, _mm_set1_epi8(-64)), 2);
91419147

91429148
const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
91439149
const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
91449150
const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
91459151
const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
91469152

9147-
const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
9148-
const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
9149-
const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
9150-
const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
9151-
const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
9152-
const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
9153-
const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
9154-
const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
9153+
const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m15), q4h_0);
9154+
const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m15), q4h_1);
9155+
const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m15), q4h_2);
9156+
const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m15), q4h_3);
9157+
const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m15), q4h_4);
9158+
const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m15), q4h_5);
9159+
const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m15), q4h_6);
9160+
const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m15), q4h_7);
91559161

91569162
const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
91579163
const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
@@ -9162,15 +9168,6 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
91629168
const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
91639169
const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
91649170

9165-
__m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
9166-
__m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
9167-
__m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
9168-
__m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
9169-
__m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
9170-
__m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
9171-
__m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
9172-
__m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
9173-
91749171
__m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
91759172
__m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
91769173
__m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
@@ -9180,32 +9177,20 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
91809177
__m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
91819178
__m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
91829179

9183-
p16_0 = _mm_sub_epi16(p16_0, q8s_0);
9184-
p16_1 = _mm_sub_epi16(p16_1, q8s_1);
9185-
p16_2 = _mm_sub_epi16(p16_2, q8s_2);
9186-
p16_3 = _mm_sub_epi16(p16_3, q8s_3);
9187-
p16_4 = _mm_sub_epi16(p16_4, q8s_4);
9188-
p16_5 = _mm_sub_epi16(p16_5, q8s_5);
9189-
p16_6 = _mm_sub_epi16(p16_6, q8s_6);
9190-
p16_7 = _mm_sub_epi16(p16_7, q8s_7);
9191-
9192-
const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
9193-
shuffle = _mm_add_epi8(shuffle, m2);
9194-
const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
9195-
shuffle = _mm_add_epi8(shuffle, m2);
9196-
const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
9197-
shuffle = _mm_add_epi8(shuffle, m2);
9198-
const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
9199-
shuffle = _mm_add_epi8(shuffle, m2);
9180+
const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0));
9181+
const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1));
9182+
const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2));
9183+
const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3));
9184+
is += 4;
92009185

92019186
p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
9202-
p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
9187+
p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_0, 8)), p16_1);
92039188
p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
9204-
p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
9189+
p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_1, 8)), p16_3);
92059190
p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
9206-
p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
9191+
p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_2, 8)), p16_5);
92079192
p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
9208-
p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
9193+
p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_bsrli_si128(scale_3, 8)), p16_7);
92099194

92109195
sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
92119196
sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
@@ -9214,8 +9199,10 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r
92149199

92159200
}
92169201

9217-
__m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
9218-
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
9202+
sumi_0 = _mm_sub_epi32(sumi_0, q8sclsub_0);
9203+
sumi_1 = _mm_sub_epi32(sumi_1, q8sclsub_1);
9204+
const __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
9205+
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(sumi)), acc);
92199206
}
92209207

92219208
*s = hsum_float_8(acc);

0 commit comments

Comments
 (0)