Skip to content

Commit 2657f7b

Browse files
committed
fix more -Wextra-semi-stmt warnings
1 parent ec6629f commit 2657f7b

File tree

8 files changed

+65
-65
lines changed

8 files changed

+65
-65
lines changed

common/log.h

Lines changed: 37 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -225,31 +225,31 @@ enum LogTriState
225225
// USE LOG() INSTEAD
226226
//
227227
#ifndef _MSC_VER
228-
#define LOG_IMPL(str, ...) \
229-
{ \
228+
#define LOG_IMPL(str, ...) \
229+
do { \
230230
if (LOG_TARGET != nullptr) \
231231
{ \
232232
fprintf(LOG_TARGET, LOG_TIMESTAMP_FMT LOG_FLF_FMT str "%s" LOG_TIMESTAMP_VAL LOG_FLF_VAL, __VA_ARGS__); \
233233
fflush(LOG_TARGET); \
234234
} \
235-
}
235+
} while (0)
236236
#else
237-
#define LOG_IMPL(str, ...) \
238-
{ \
237+
#define LOG_IMPL(str, ...) \
238+
do { \
239239
if (LOG_TARGET != nullptr) \
240240
{ \
241241
fprintf(LOG_TARGET, LOG_TIMESTAMP_FMT LOG_FLF_FMT str "%s" LOG_TIMESTAMP_VAL LOG_FLF_VAL "", ##__VA_ARGS__); \
242242
fflush(LOG_TARGET); \
243243
} \
244-
}
244+
} while (0)
245245
#endif
246246

247247
// INTERNAL, DO NOT USE
248248
// USE LOG_TEE() INSTEAD
249249
//
250250
#ifndef _MSC_VER
251-
#define LOG_TEE_IMPL(str, ...) \
252-
{ \
251+
#define LOG_TEE_IMPL(str, ...) \
252+
do { \
253253
if (LOG_TARGET != nullptr) \
254254
{ \
255255
fprintf(LOG_TARGET, LOG_TIMESTAMP_FMT LOG_FLF_FMT str "%s" LOG_TIMESTAMP_VAL LOG_FLF_VAL, __VA_ARGS__); \
@@ -260,10 +260,10 @@ enum LogTriState
260260
fprintf(LOG_TEE_TARGET, LOG_TEE_TIMESTAMP_FMT LOG_TEE_FLF_FMT str "%s" LOG_TEE_TIMESTAMP_VAL LOG_TEE_FLF_VAL, __VA_ARGS__); \
261261
fflush(LOG_TEE_TARGET); \
262262
} \
263-
}
263+
} while (0)
264264
#else
265-
#define LOG_TEE_IMPL(str, ...) \
266-
{ \
265+
#define LOG_TEE_IMPL(str, ...) \
266+
do { \
267267
if (LOG_TARGET != nullptr) \
268268
{ \
269269
fprintf(LOG_TARGET, LOG_TIMESTAMP_FMT LOG_FLF_FMT str "%s" LOG_TIMESTAMP_VAL LOG_FLF_VAL "", ##__VA_ARGS__); \
@@ -274,7 +274,7 @@ enum LogTriState
274274
fprintf(LOG_TEE_TARGET, LOG_TEE_TIMESTAMP_FMT LOG_TEE_FLF_FMT str "%s" LOG_TEE_TIMESTAMP_VAL LOG_TEE_FLF_VAL "", ##__VA_ARGS__); \
275275
fflush(LOG_TEE_TARGET); \
276276
} \
277-
}
277+
} while (0)
278278
#endif
279279

280280
// The '\0' as a last argument, is a trick to bypass the silly
@@ -435,41 +435,41 @@ inline FILE *log_handler() { return log_handler1_impl(); }
435435
inline void log_test()
436436
{
437437
log_disable();
438-
LOG("01 Hello World to nobody, because logs are disabled!\n")
438+
LOG("01 Hello World to nobody, because logs are disabled!\n");
439439
log_enable();
440-
LOG("02 Hello World to default output, which is \"%s\" ( Yaaay, arguments! )!\n", LOG_STRINGIZE(LOG_TARGET))
441-
LOG_TEE("03 Hello World to **both** default output and " LOG_TEE_TARGET_STRING "!\n")
440+
LOG("02 Hello World to default output, which is \"%s\" ( Yaaay, arguments! )!\n", LOG_STRINGIZE(LOG_TARGET));
441+
LOG_TEE("03 Hello World to **both** default output and " LOG_TEE_TARGET_STRING "!\n");
442442
log_set_target(stderr);
443-
LOG("04 Hello World to stderr!\n")
444-
LOG_TEE("05 Hello World TEE with double printing to stderr prevented!\n")
443+
LOG("04 Hello World to stderr!\n");
444+
LOG_TEE("05 Hello World TEE with double printing to stderr prevented!\n");
445445
log_set_target(LOG_DEFAULT_FILE_NAME);
446-
LOG("06 Hello World to default log file!\n")
446+
LOG("06 Hello World to default log file!\n");
447447
log_set_target(stdout);
448-
LOG("07 Hello World to stdout!\n")
448+
LOG("07 Hello World to stdout!\n");
449449
log_set_target(LOG_DEFAULT_FILE_NAME);
450-
LOG("08 Hello World to default log file again!\n")
450+
LOG("08 Hello World to default log file again!\n");
451451
log_disable();
452-
LOG("09 Hello World _1_ into the void!\n")
452+
LOG("09 Hello World _1_ into the void!\n");
453453
log_enable();
454-
LOG("10 Hello World back from the void ( you should not see _1_ in the log or the output )!\n")
454+
LOG("10 Hello World back from the void ( you should not see _1_ in the log or the output )!\n");
455455
log_disable();
456456
log_set_target("llama.anotherlog.log");
457-
LOG("11 Hello World _2_ to nobody, new target was selected but logs are still disabled!\n")
457+
LOG("11 Hello World _2_ to nobody, new target was selected but logs are still disabled!\n");
458458
log_enable();
459-
LOG("12 Hello World this time in a new file ( you should not see _2_ in the log or the output )?\n")
459+
LOG("12 Hello World this time in a new file ( you should not see _2_ in the log or the output )?\n");
460460
log_set_target("llama.yetanotherlog.log");
461-
LOG("13 Hello World this time in yet new file?\n")
461+
LOG("13 Hello World this time in yet new file?\n");
462462
log_set_target(log_filename_generator("llama_autonamed", "log"));
463-
LOG("14 Hello World in log with generated filename!\n")
463+
LOG("14 Hello World in log with generated filename!\n");
464464
#ifdef _MSC_VER
465-
LOG_TEE("15 Hello msvc TEE without arguments\n")
466-
LOG_TEE("16 Hello msvc TEE with (%d)(%s) arguments\n", 1, "test")
467-
LOG_TEELN("17 Hello msvc TEELN without arguments\n")
468-
LOG_TEELN("18 Hello msvc TEELN with (%d)(%s) arguments\n", 1, "test")
469-
LOG("19 Hello msvc LOG without arguments\n")
470-
LOG("20 Hello msvc LOG with (%d)(%s) arguments\n", 1, "test")
471-
LOGLN("21 Hello msvc LOGLN without arguments\n")
472-
LOGLN("22 Hello msvc LOGLN with (%d)(%s) arguments\n", 1, "test")
465+
LOG_TEE("15 Hello msvc TEE without arguments\n");
466+
LOG_TEE("16 Hello msvc TEE with (%d)(%s) arguments\n", 1, "test");
467+
LOG_TEELN("17 Hello msvc TEELN without arguments\n");
468+
LOG_TEELN("18 Hello msvc TEELN with (%d)(%s) arguments\n", 1, "test");
469+
LOG("19 Hello msvc LOG without arguments\n");
470+
LOG("20 Hello msvc LOG with (%d)(%s) arguments\n", 1, "test");
471+
LOGLN("21 Hello msvc LOGLN without arguments\n");
472+
LOGLN("22 Hello msvc LOGLN with (%d)(%s) arguments\n", 1, "test");
473473
#endif
474474
}
475475

@@ -542,7 +542,7 @@ inline void log_dump_cmdline_impl(int argc, char **argv)
542542
buf << " " << argv[i];
543543
}
544544
}
545-
LOGLN("Cmd:%s", buf.str().c_str())
545+
LOGLN("Cmd:%s", buf.str().c_str());
546546
}
547547

548548
#define log_tostr(var) log_var_to_string_impl(var).c_str()
@@ -620,10 +620,10 @@ inline std::string log_var_to_string_impl(const std::vector<int> & var)
620620
#define LOGLN(...) // dummy stub
621621

622622
#undef LOG_TEE
623-
#define LOG_TEE(...) fprintf(stderr, __VA_ARGS__); // convert to normal fprintf
623+
#define LOG_TEE(...) fprintf(stderr, __VA_ARGS__) // convert to normal fprintf
624624

625625
#undef LOG_TEELN
626-
#define LOG_TEELN(...) fprintf(stderr, __VA_ARGS__); // convert to normal fprintf
626+
#define LOG_TEELN(...) fprintf(stderr, __VA_ARGS__) // convert to normal fprintf
627627

628628
#undef LOG_DISABLE
629629
#define LOG_DISABLE() // dummy stub

examples/baby-llama/baby-llama.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ static struct ggml_tensor * randomize_tensor(
8888
break;
8989
default:
9090
assert(false);
91-
};
91+
}
9292

9393
return tensor;
9494
}
@@ -136,7 +136,7 @@ static struct ggml_tensor * randomize_tensor_normal(
136136
break;
137137
default:
138138
assert(false);
139-
};
139+
}
140140

141141
return tensor;
142142
}

examples/main/main.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -855,7 +855,7 @@ int main(int argc, char ** argv) {
855855
llama_backend_free();
856856

857857
#ifndef LOG_DISABLE_LOGS
858-
LOG_TEE("Log end\n")
858+
LOG_TEE("Log end\n");
859859
#endif // LOG_DISABLE_LOGS
860860

861861
return 0;

examples/train-text-from-scratch/train-text-from-scratch.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ struct ggml_tensor * randomize_tensor_normal(struct ggml_tensor * tensor, struct
107107
break;
108108
default:
109109
assert(false);
110-
};
110+
}
111111
return tensor;
112112
}
113113

@@ -151,7 +151,7 @@ struct ggml_tensor * randomize_tensor_uniform(struct ggml_tensor * tensor, struc
151151
break;
152152
default:
153153
assert(false);
154-
};
154+
}
155155
return tensor;
156156
}
157157

@@ -1015,7 +1015,7 @@ void shuffle_ints(int * begin, int * end) {
10151015
}
10161016

10171017
#define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
1018-
{ \
1018+
do { \
10191019
const std::string skey(key); \
10201020
const int kid = gguf_find_key(ctx, skey.c_str()); \
10211021
if (kid >= 0) { \
@@ -1027,7 +1027,7 @@ void shuffle_ints(int * begin, int * end) {
10271027
} else if (req) { \
10281028
die_fmt("key not found in model: %s", skey.c_str()); \
10291029
} \
1030-
}
1030+
} while (0)
10311031

10321032

10331033
bool are_same_layout(struct ggml_tensor * a, struct ggml_tensor * b) {

ggml.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1863,7 +1863,7 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
18631863
#define GGML_F16x8_ADD vaddq_f16
18641864
#define GGML_F16x8_MUL vmulq_f16
18651865
#define GGML_F16x8_REDUCE(res, x) \
1866-
{ \
1866+
do { \
18671867
int offset = GGML_F16_ARR >> 1; \
18681868
for (int i = 0; i < offset; ++i) { \
18691869
x[i] = vaddq_f16(x[i], x[offset+i]); \
@@ -1879,7 +1879,7 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
18791879
const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
18801880
const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
18811881
res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
1882-
}
1882+
} while (0)
18831883

18841884
#define GGML_F16_VEC GGML_F16x8
18851885
#define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
@@ -1940,7 +1940,7 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
19401940
#define GGML_F32x8_ADD _mm256_add_ps
19411941
#define GGML_F32x8_MUL _mm256_mul_ps
19421942
#define GGML_F32x8_REDUCE(res, x) \
1943-
{ \
1943+
do { \
19441944
int offset = GGML_F32_ARR >> 1; \
19451945
for (int i = 0; i < offset; ++i) { \
19461946
x[i] = _mm256_add_ps(x[i], x[offset+i]); \
@@ -1957,7 +1957,7 @@ ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type) {
19571957
_mm256_extractf128_ps(x[0], 1)); \
19581958
const __m128 t1 = _mm_hadd_ps(t0, t0); \
19591959
res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
1960-
}
1960+
} while (0)
19611961
// TODO: is this optimal ?
19621962

19631963
#define GGML_F32_VEC GGML_F32x8
@@ -13562,7 +13562,7 @@ static void ggml_compute_forward_conv_1d(
1356213562
ggml_compute_forward_conv_1d_s2_ph(params, src0, src1, dst);
1356313563
} else {
1356413564
GGML_ASSERT(false); // only stride 1 and 2 supported
13565-
};
13565+
}
1356613566
}
1356713567

1356813568
// ggml_compute_forward_conv_2d
@@ -19876,10 +19876,10 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
1987619876
} break;
1987719877
case GGUF_TYPE_ARRAY:
1987819878
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
19879-
};
19879+
}
1988019880
} break;
1988119881
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
19882-
};
19882+
}
1988319883

1988419884
if (!ok) {
1988519885
break;
@@ -20591,10 +20591,10 @@ static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf *
2059120591
} break;
2059220592
case GGUF_TYPE_ARRAY:
2059320593
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
20594-
};
20594+
}
2059520595
} break;
2059620596
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
20597-
};
20597+
}
2059820598
}
2059920599

2060020600
// write tensor infos

llama.cpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -448,7 +448,7 @@ struct LLM_TN {
448448
//
449449

450450
#define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
451-
{ \
451+
do { \
452452
const std::string skey(key); \
453453
const int kid = gguf_find_key(ctx, skey.c_str()); \
454454
if (kid >= 0) { \
@@ -460,7 +460,7 @@ struct LLM_TN {
460460
} else if (req) { \
461461
throw std::runtime_error(format("key not found in model: %s", skey.c_str())); \
462462
} \
463-
}
463+
} while (0)
464464

465465
//
466466
// ggml helpers
@@ -1760,7 +1760,7 @@ static void llm_load_hparams(
17601760
}
17611761
} break;
17621762
default: (void)0;
1763-
};
1763+
}
17641764

17651765
model.ftype = ml.ftype;
17661766

@@ -2298,7 +2298,7 @@ static void llm_load_tensors(
22982298
} break;
22992299
default:
23002300
throw std::runtime_error("unknown architecture");
2301-
};
2301+
}
23022302
}
23032303

23042304
ml.done_getting_tensors();
@@ -3693,7 +3693,7 @@ static struct ggml_cgraph * llama_build_graph(
36933693
} break;
36943694
default:
36953695
GGML_ASSERT(false);
3696-
};
3696+
}
36973697

36983698
return result;
36993699
}
@@ -4274,7 +4274,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
42744274
llm_tokenizer_bpe tokenizer(vocab);
42754275
tokenizer.tokenize(raw_text, output);
42764276
} break;
4277-
};
4277+
}
42784278

42794279
return output;
42804280
}
@@ -7094,7 +7094,7 @@ int llama_token_to_piece_with_model(const struct llama_model * model, llama_toke
70947094
buf[2] = '\x85';
70957095
return 3;
70967096
} else if (llama_is_control_token(model->vocab, token)) {
7097-
;
7097+
// do nothing
70987098
} else if (llama_is_byte_token(model->vocab, token)) {
70997099
if (length < 1) {
71007100
return -1;

tests/test-grad0.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ static struct ggml_tensor * get_random_tensor_f32(
107107
break;
108108
default:
109109
assert(false);
110-
};
110+
}
111111

112112
return result;
113113
}
@@ -155,7 +155,7 @@ static struct ggml_tensor * get_random_tensor_f16(
155155
break;
156156
default:
157157
assert(false);
158-
};
158+
}
159159

160160
return result;
161161
}
@@ -203,7 +203,7 @@ static struct ggml_tensor * get_random_tensor_i32(
203203
break;
204204
default:
205205
assert(false);
206-
};
206+
}
207207

208208
return result;
209209
}

tests/test-opt.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ static struct ggml_tensor * get_random_tensor(
101101
break;
102102
default:
103103
assert(false);
104-
};
104+
}
105105

106106
return result;
107107
}
@@ -124,7 +124,7 @@ int main(void) {
124124
struct ggml_context * ctx = ggml_init(params);
125125

126126
int64_t ne1[4] = {4, 128, 1, 1};
127-
int64_t ne2[4] = {4, 256, 1, 1};;
127+
int64_t ne2[4] = {4, 256, 1, 1};
128128
int64_t ne3[4] = {128, 256, 1, 1};
129129

130130
struct ggml_tensor * a = get_random_tensor(ctx, 2, ne1, -1, +1);

0 commit comments

Comments
 (0)