diff --git a/Makefile b/Makefile index 6d89401c8a76e..823288c27e8b0 100644 --- a/Makefile +++ b/Makefile @@ -210,7 +210,7 @@ libllama.so: llama.o ggml.o $(OBJS) # Tests # -benchmark-matmult: examples/benchmark/benchmark-matmult.cpp ggml.o $(OBJS) +benchmark-matmult: examples/benchmark/benchmark-matmult.cpp ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) ./$@ diff --git a/examples/benchmark/benchmark-matmult.cpp b/examples/benchmark/benchmark-matmult.cpp index 19cbab1c38825..83f01d5f09939 100644 --- a/examples/benchmark/benchmark-matmult.cpp +++ b/examples/benchmark/benchmark-matmult.cpp @@ -1,5 +1,6 @@ #include #include "ggml.h" +#include "llama.h" #include #include #include @@ -161,12 +162,40 @@ int main(int argc, char ** argv) { struct ggml_cgraph gf = ggml_build_forward(m11xm2); gf.n_threads=benchmark_params.n_threads; - printf("cgraph->n_threads=%i\n",gf.n_threads); + fprintf(stderr, "system_info: n_threads = %d | %s\n", + benchmark_params.n_threads, llama_print_system_info()); TENSOR_DUMP(m11); TENSOR_DUMP(m2); ggml_graph_compute(ctx, &gf); + { + const int dimx = sizex; + const int dimy = sizey; + const int dimz = sizez; + long long int flops_per_dot_product = dimy + dimy; + long long int flops_per_matrix = flops_per_dot_product * dimx * dimz; ; + printf("Matrix Multiplication of (%i,%i,%i) x (%i,%i,%i) - about %6.2f gFLOPS\n\n", sizex, sizey, 1, sizex, sizez, 1, 1.0f*flops_per_matrix / 1000 / 1000 / 1000); + + printf("Iteration;NThreads; SizeX; SizeY; SizeZ; Required_FLOPS; Elapsed_u_Seconds; FLOPS_per_u_Second\n"); + printf("==============================================================================================\n"); + + for (int i=0;i #include +#if defined(__AVX2__) +#include +#endif + // if C99 - static_assert is noop // ref: https://stackoverflow.com/a/53923785/4039976 #ifndef static_assert @@ -95,7 +99,7 @@ typedef void* thread_ret_t; #define static_assert(cond, msg) _Static_assert(cond, msg) #endif -/*#define GGML_PERF*/ +// #define GGML_PERF #define GGML_DEBUG 0 #define GGML_GELU_FP16 #define GGML_SILU_FP16 @@ -492,6 +496,83 @@ static inline int hsum_i32_4(const __m128i a) { return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32)); } +// AVX routine provided by GH user jon-chuang +#if (__AVX2__ || __AVX512F__) && __FMA__ +// Given A = K X M, B = K X N, compute one row of C = A^TB +void ggml_mul_row_f32_tall_skinny(const float * A, const float * B, float * C, int M, int N, int K) { + alignas(32) float res_vec[8]; + for (int j = 0; j < N; j += 8) { // Process 8 elements of C's row at a time - 256 / size_of(float) + __m256 c_vec = _mm256_setzero_ps(); // Initialize the result vector to all zeros + for (int k = 0; k < K; ++k) { + __m256 a = _mm256_broadcast_ss(&A[k * M]); // Broadcast the k-th element of the row of A^T + __m256 b_vec = _mm256_load_ps(&B[j + k * N]); // Load the j/8-th segment of the k-th row of B^T (corresponding to the k-th column of B) + c_vec = _mm256_fmadd_ps(a, b_vec, c_vec); // FMA: c_vec += a * b_vec + } + + // Store the result in the corresponding row of C + _mm256_store_ps((float *) &res_vec, c_vec); + + for (int k = 0; k < 8; ++k) { + C[j+k] = res_vec[k]; + } + } + + // Handle the remainder + const int64_t remainder = N & (8l - 1); + if (remainder > 0) { + int j = N - remainder; + __m256i mask_vec = _mm256_set1_epi32(0xffffffff << (8l - remainder)); + + __m256 c_vec = _mm256_setzero_ps(); // Initialize the result vector to all zeros + + for (int k = 0; k < K; ++k) { + __m256 a = _mm256_broadcast_ss(&A[k * M]); // Broadcast the k-th element of the row of A^T + __m256 b_vec = _mm256_maskload_ps(&B[j + k * N], mask_vec); // Load the j/8-th segment of the k-th row of B^T (corresponding to the k-th column of B) + c_vec = _mm256_fmadd_ps(a, b_vec, c_vec); // FMA: c_vec += a * b_vec + } + + // Store the result in the corresponding offset of C + _mm256_maskstore_ps(&C[j], mask_vec, c_vec); + } +} +#elif __AVX__ && __FMA__ +// Given A = K X M, B = K X N, compute one row of C = A^TB +void ggml_mul_row_f32_tall_skinny(const float * A, const float * B, float * C, int M, int N, int K) { + for (int j = 0; j < N; j += 4) { // Process 4 elements of C's row at a time - 128 / size_of(float) + __m128 c_vec = _mm_setzero_ps(); // Initialize the result vector to all zeros + + for (int k = 0; k < K; ++k) { + __m128 a = _mm_broadcast_ss(&A[k * M]); // Broadcast the k-th element of the row of A^T + __m128 b_vec = _mm_load_ps(&B[j + k * N]); // Load the j/4-th segment of the k-th row of B^T (corresponding to the k-th column of B) + c_vec = _mm_fmadd_ps(a, b_vec, c_vec); // FMA: c_vec += a * b_vec + } + + // Store the result in the corresponding row of C + _mm_store_ps(&C[j], c_vec); + } + + // Handle the remainder + const int64_t remainder = N & (4l - 1); + if (remainder > 0) { + int j = N - remainder; + __m128i mask_vec = _mm_set1_epi32(0xffffffff << (4l - remainder)); + + __m128 c_vec = _mm_setzero_ps(); // Initialize the result vector to all zeros + + for (int k = 0; k < K; ++k) { + __m128 a = _mm_broadcast_ss(&A[k * M]); // Broadcast the k-th element of the row of A^T + __m128 b_vec = _mm_maskload_ps(&B[j + k * N], mask_vec); // Load the j/4-th segment of the k-th row of B^T (corresponding to the k-th column of B) + c_vec = _mm_fmadd_ps(a, b_vec, c_vec); // FMA: c_vec += a * b_vec + } + + // Store the result in the corresponding offset of C + _mm_maskstore_ps(&C[j], mask_vec, c_vec); + } +} +#endif + +// AVX routines provided by GH user Const-me +// ref: https://github.com/ggerganov/ggml/pull/27#issuecomment-1464934600 #if __AVX2__ || __AVX512F__ // spread 32 bits to 32 bytes { 0x00, 0xFF } static inline __m256i bytes_from_bits_32(const uint8_t * x) { @@ -8042,6 +8123,35 @@ static void ggml_compute_forward_mul_mat_f32( assert(ne2 == ne02); assert(ne3 == ne03); +#if (__AVX512F__ || __AVX2__ || __AVX__) && __FMA__ + // Handle tall and skinny matrices + if ((ggml_cpu_has_avx2() && ne00 <= 48) || ne00 <= 32) { + if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) { + return; + } + // total rows in src0 + const int nr = ne01*ne02*ne03; + + // rows per thread + const int dr = (nr + nth - 1)/nth; + + // row range for this thread + const int ir0 = dr*ith; + const int ir1 = MIN(ir0 + dr, nr); + + // fprintf(stderr, "TS_MUL_MAT: %i %i %i, ir0: %d, ir1: %d, ID: %d\n", ne00, ne11, ne01, ir0, ir1, thrd_current()); + for (int i = ir0; i < ir1; ++i) { + // M = ne01, N = ne11, K = ne00 + ggml_mul_row_f32_tall_skinny( + (float *) src0->data + i, + (float *) src1->data, + (float *) dst->data + i*ne01, + ne01, ne11, ne00); + } + return; + } +#endif + // nb01 >= nb00 - src0 is not transposed // compute by src0 rows