@@ -49,13 +49,6 @@ typedef struct {
49
49
} block_q4_1;
50
50
static_assert (sizeof (block_q4_1) == sizeof (float ) * 2 + QK4_1 / 2 , " wrong q4_1 block size/padding" );
51
51
52
- #define QK4_2 16
53
- typedef struct {
54
- half d; // delta
55
- uint8_t qs[QK4_2 / 2 ]; // nibbles / quants
56
- } block_q4_2;
57
- static_assert (sizeof (block_q4_2) == sizeof (ggml_fp16_t ) + QK4_2 / 2 , " wrong q4_2 block size/padding" );
58
-
59
52
#define QK5_0 32
60
53
typedef struct {
61
54
half d; // delta
@@ -127,29 +120,6 @@ static __global__ void dequantize_block_q4_1(const void * vx, float * y) {
127
120
}
128
121
}
129
122
130
- static __global__ void dequantize_block_q4_2 (const void * vx, float * y) {
131
- const block_q4_2 * x = (const block_q4_2 *) vx;
132
-
133
- const int i = blockIdx .x ;
134
-
135
- const float d = x[i].d ;
136
-
137
- const uint8_t * pp = x[i].qs ;
138
-
139
- for (int l = 0 ; l < QK4_2; l += 2 ) {
140
- const uint8_t vi = pp[l/2 ];
141
-
142
- const int8_t vi0 = vi & 0xf ;
143
- const int8_t vi1 = vi >> 4 ;
144
-
145
- const float v0 = (vi0 - 8 )*d;
146
- const float v1 = (vi1 - 8 )*d;
147
-
148
- y[i*QK4_2 + l + 0 ] = v0;
149
- y[i*QK4_2 + l + 1 ] = v1;
150
- }
151
- }
152
-
153
123
static __global__ void dequantize_block_q5_0 (const void * vx, float * y) {
154
124
const block_q5_0 * x = (const block_q5_0 *) vx;
155
125
@@ -235,11 +205,6 @@ static void dequantize_row_q4_1_cuda(const void * vx, float * y, int k, cudaStre
235
205
dequantize_block_q4_1<<<nb, 1 , 0 , stream>>> (vx, y);
236
206
}
237
207
238
- static void dequantize_row_q4_2_cuda (const void * vx, float * y, int k, cudaStream_t stream) {
239
- const int nb = k / QK4_2;
240
- dequantize_block_q4_2<<<nb, 1 , 0 , stream>>> (vx, y);
241
- }
242
-
243
208
static void dequantize_row_q5_0_cuda (const void * vx, float * y, int k, cudaStream_t stream) {
244
209
const int nb = k / QK5_0;
245
210
dequantize_block_q5_0<<<nb, 1 , 0 , stream>>> (vx, y);
@@ -274,8 +239,6 @@ static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
274
239
return dequantize_row_q4_0_cuda;
275
240
case GGML_TYPE_Q4_1:
276
241
return dequantize_row_q4_1_cuda;
277
- case GGML_TYPE_Q4_2:
278
- return dequantize_row_q4_2_cuda;
279
242
case GGML_TYPE_Q5_0:
280
243
return dequantize_row_q5_0_cuda;
281
244
case GGML_TYPE_Q5_1:
0 commit comments