Skip to content

Commit 51d0b49

Browse files
committed
set up the same thing for arm
fix test-quantize-perf just like #12306
1 parent 2ce2bd5 commit 51d0b49

File tree

2 files changed

+29
-14
lines changed

2 files changed

+29
-14
lines changed

.github/workflows/build.yml

Lines changed: 28 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1276,17 +1276,29 @@ jobs:
12761276
LLAMA_ARG_THREADS=$(nproc) GG_BUILD_LOW_PERF=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
12771277
12781278
ggml-ci-arm64-cpu-low-perf:
1279-
runs-on: [self-hosted, Linux, ARM64, CPU, low-perf]
1279+
runs-on: ubuntu-22.04-arm
12801280

12811281
steps:
12821282
- name: Clone
12831283
id: checkout
12841284
uses: actions/checkout@v4
12851285

1286+
- name: ccache
1287+
uses: ggml-org/[email protected]
1288+
with:
1289+
key: ggml-ci-arm64-cpu-low-perf
1290+
evict-old-files: 1d
1291+
1292+
- name: Dependencies
1293+
id: depends
1294+
run: |
1295+
sudo apt-get update
1296+
sudo apt-get install build-essential libcurl4-openssl-dev
1297+
12861298
- name: Test
12871299
id: ggml-ci
12881300
run: |
1289-
bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1301+
LLAMA_ARG_THREADS=$(nproc) GG_BUILD_LOW_PERF=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
12901302
12911303
ggml-ci-x64-cpu-high-perf:
12921304
runs-on: ubuntu-22.04
@@ -1314,17 +1326,29 @@ jobs:
13141326
LLAMA_ARG_THREADS=$(nproc) bash ./ci/run.sh ./tmp/results ./tmp/mnt
13151327
13161328
ggml-ci-arm64-cpu-high-perf:
1317-
runs-on: [self-hosted, Linux, ARM64, CPU, high-perf]
1329+
runs-on: ubuntu-22.04-arm
13181330

13191331
steps:
13201332
- name: Clone
13211333
id: checkout
13221334
uses: actions/checkout@v4
13231335

1336+
- name: ccache
1337+
uses: ggml-org/[email protected]
1338+
with:
1339+
key: ggml-ci-arm64-cpu-high-perf
1340+
evict-old-files: 1d
1341+
1342+
- name: Dependencies
1343+
id: depends
1344+
run: |
1345+
sudo apt-get update
1346+
sudo apt-get install build-essential libcurl4-openssl-dev
1347+
13241348
- name: Test
13251349
id: ggml-ci
13261350
run: |
1327-
GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ~/results/llama.cpp /mnt/llama.cpp
1351+
LLAMA_ARG_THREADS=$(nproc) GG_BUILD_NO_BF16=1 GG_BUILD_EXTRA_TESTS_0=1 bash ./ci/run.sh ./tmp/results ./tmp/mnt
13281352
13291353
ggml-ci-x64-nvidia-v100-cuda:
13301354
runs-on: [self-hosted, Linux, X64, NVIDIA, V100]

tests/test-quantize-perf.cpp

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -260,14 +260,7 @@ int main(int argc, char * argv[]) {
260260

261261
int64_t iterations = params.iterations;
262262

263-
264-
// Initialize GGML, ensures float conversion tables are initialized
265-
struct ggml_init_params ggml_params = {
266-
/* .mem_size = */ 1*1024,
267-
/* .mem_buffer = */ NULL,
268-
/* .no_alloc = */ true,
269-
};
270-
struct ggml_context * ctx = ggml_init(ggml_params);
263+
ggml_cpu_init();
271264

272265
for (int i = 0; i < GGML_TYPE_COUNT; i++) {
273266
ggml_type type = (ggml_type) i;
@@ -359,7 +352,5 @@ int main(int argc, char * argv[]) {
359352
}
360353
}
361354

362-
ggml_free(ctx);
363-
364355
return 0;
365356
}

0 commit comments

Comments
 (0)