mirror of
https://github.com/ggerganov/whisper.cpp.git
synced 2024-12-18 20:27:53 +00:00
ci : add an option to fail on compile warning (llama/3952)
* feat(ci): add an option to fail on compile warning * Update CMakeLists.txt * minor : fix compile warnings ggml-ci * ggml : fix unreachable code warnings ggml-ci * ci : disable fatal warnings for windows, ios and tvos * ggml : fix strncpy warning * ci : disable fatal warnings for MPI build * ci : add fatal warnings to ggml-ci ggml-ci --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
parent
74a6acc999
commit
1b25d2fa0a
@ -1006,6 +1006,7 @@ static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, gg
|
||||
}
|
||||
}
|
||||
GGML_ASSERT(false && "tensor buffer type not supported by any backend");
|
||||
return -1; // silence warning
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
@ -176,7 +176,7 @@ struct ggml_metal_context {
|
||||
// MSL code
|
||||
// TODO: move the contents here when ready
|
||||
// for now it is easier to work in a separate file
|
||||
//static NSString * const msl_library_source = @"see metal.metal";
|
||||
// static NSString * const msl_library_source = @"see metal.metal";
|
||||
|
||||
// Here to assist with NSBundle Path Hack
|
||||
@interface GGMLMetalClass : NSObject
|
||||
|
15
ggml.c
15
ggml.c
@ -868,7 +868,7 @@ do { \
|
||||
const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
|
||||
_mm256_extractf128_ps(x[0], 1)); \
|
||||
const __m128 t1 = _mm_hadd_ps(t0, t0); \
|
||||
res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
|
||||
res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
|
||||
} while (0)
|
||||
// TODO: is this optimal ?
|
||||
|
||||
@ -1149,7 +1149,7 @@ inline static void __wasm_f16x4_store(ggml_fp16_t * p, v128_t x) {
|
||||
x[i] = _mm_add_ps(x[i], x[offset+i]); \
|
||||
} \
|
||||
const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
|
||||
res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
|
||||
res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
|
||||
}
|
||||
// TODO: is this optimal ?
|
||||
|
||||
@ -2086,6 +2086,7 @@ void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
|
||||
}
|
||||
}
|
||||
#else
|
||||
GGML_UNUSED(numa_flag);
|
||||
// TODO
|
||||
#endif
|
||||
}
|
||||
@ -3219,7 +3220,7 @@ const char * ggml_get_name(const struct ggml_tensor * tensor) {
|
||||
}
|
||||
|
||||
struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
|
||||
strncpy(tensor->name, name, sizeof(tensor->name));
|
||||
strncpy(tensor->name, name, sizeof(tensor->name) - 1);
|
||||
tensor->name[sizeof(tensor->name) - 1] = '\0';
|
||||
return tensor;
|
||||
}
|
||||
@ -18575,7 +18576,9 @@ static enum ggml_opt_result linesearch_backtracking(
|
||||
(*step) *= width;
|
||||
}
|
||||
|
||||
GGML_UNREACHABLE();
|
||||
GGML_ASSERT(false && "line search failed");
|
||||
|
||||
return GGML_LINESEARCH_FAIL;
|
||||
}
|
||||
|
||||
static enum ggml_opt_result ggml_opt_lbfgs(
|
||||
@ -18843,7 +18846,9 @@ static enum ggml_opt_result ggml_opt_lbfgs(
|
||||
step[0] = 1.0;
|
||||
}
|
||||
|
||||
GGML_UNREACHABLE();
|
||||
GGML_ASSERT(false && "lbfgs failed");
|
||||
|
||||
return GGML_OPT_DID_NOT_CONVERGE;
|
||||
}
|
||||
|
||||
struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
|
||||
|
Loading…
Reference in New Issue
Block a user