Fix conversion of unnormalized BF16->BF16 weights (llama/7843)

* add truncate_bf16

* truncate intermediate fp32 if converting bf16 to bf16

* fix masking in __compute_fp32_to_bf16

* np.int16 no longer used

* missing cast and additional numpy 2.x fix

* ggml-impl : do not flush bf16 subnormals to zero

* ggml : add reference fp32 to bf16 conversion

The fast version is no longer equivalent for all platforms
because of the handling of subnormal values.

* gguf-py : remove flush to zero for bf16 subnormals

* gguf-py : remove float32 truncation to bf16

Rounding achieves the same thing in the cases where this was used.

* missed prototype update in merge

* merge cleanup

---------

Co-authored-by: Francis Couture-Harpin <git@compilade.net>
This commit is contained in:
Sigbjørn Skjæret 2024-08-02 21:11:39 +02:00 committed by Georgi Gerganov
parent 9cf14ebcbc
commit 6cb38c3673
3 changed files with 13 additions and 8 deletions

View File

@ -349,6 +349,7 @@ extern "C" {
GGML_API ggml_bf16_t ggml_fp32_to_bf16(float); GGML_API ggml_bf16_t ggml_fp32_to_bf16(float);
GGML_API float ggml_bf16_to_fp32(ggml_bf16_t); // consider just doing << 16 GGML_API float ggml_bf16_to_fp32(ggml_bf16_t); // consider just doing << 16
GGML_API void ggml_bf16_to_fp32_row(const ggml_bf16_t *, float *, int64_t); GGML_API void ggml_bf16_to_fp32_row(const ggml_bf16_t *, float *, int64_t);
GGML_API void ggml_fp32_to_bf16_row_ref(const float *, ggml_bf16_t *, int64_t);
GGML_API void ggml_fp32_to_bf16_row(const float *, ggml_bf16_t *, int64_t); GGML_API void ggml_fp32_to_bf16_row(const float *, ggml_bf16_t *, int64_t);
struct ggml_object; struct ggml_object;

View File

@ -80,8 +80,9 @@ static inline float ggml_compute_bf16_to_fp32(ggml_bf16_t h) {
/** /**
* Converts float32 to brain16. * Converts float32 to brain16.
* *
* This function is binary identical to AMD Zen4 VCVTNEPS2BF16. * This is binary identical with Google Brain float conversion.
* Subnormals shall be flushed to zero, and NANs will be quiet. * Floats shall round to nearest even, and NANs shall be quiet.
* Subnormals aren't flushed to zero, except perhaps when used.
* This code should vectorize nicely if using modern compilers. * This code should vectorize nicely if using modern compilers.
*/ */
static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) { static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) {
@ -95,10 +96,6 @@ static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) {
h.bits = (u.i >> 16) | 64; /* force to quiet */ h.bits = (u.i >> 16) | 64; /* force to quiet */
return h; return h;
} }
if (!(u.i & 0x7f800000)) { /* subnormal */
h.bits = (u.i & 0x80000000) >> 16; /* flush to zero */
return h;
}
h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16; h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
return h; return h;
} }

View File

@ -483,9 +483,16 @@ void ggml_bf16_to_fp32_row(const ggml_bf16_t * x, float * y, int64_t n) {
} }
} }
void ggml_fp32_to_bf16_row_ref(const float * x, ggml_bf16_t * y, int64_t n) {
for (int i = 0; i < n; i++) {
y[i] = ggml_compute_fp32_to_bf16(x[i]);
}
}
void ggml_fp32_to_bf16_row(const float * x, ggml_bf16_t * y, int64_t n) { void ggml_fp32_to_bf16_row(const float * x, ggml_bf16_t * y, int64_t n) {
int i = 0; int i = 0;
#if defined(__AVX512BF16__) #if defined(__AVX512BF16__)
// subnormals are flushed to zero on this platform
for (; i + 32 <= n; i += 32) { for (; i + 32 <= n; i += 32) {
_mm512_storeu_si512( _mm512_storeu_si512(
(__m512i *)(y + i), (__m512i *)(y + i),
@ -965,7 +972,7 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.is_quantized = false, .is_quantized = false,
.to_float = (ggml_to_float_t) ggml_bf16_to_fp32_row, .to_float = (ggml_to_float_t) ggml_bf16_to_fp32_row,
.from_float = (ggml_from_float_t) ggml_fp32_to_bf16_row, .from_float = (ggml_from_float_t) ggml_fp32_to_bf16_row,
.from_float_ref = (ggml_from_float_t) ggml_fp32_to_bf16_row, .from_float_ref = (ggml_from_float_t) ggml_fp32_to_bf16_row_ref,
.vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16, .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16,
.vec_dot_type = GGML_TYPE_BF16, .vec_dot_type = GGML_TYPE_BF16,
.nrows = 1, .nrows = 1,
@ -20653,7 +20660,7 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_BF16: case GGML_TYPE_BF16:
{ {
size_t elemsize = sizeof(ggml_bf16_t); size_t elemsize = sizeof(ggml_bf16_t);
ggml_fp32_to_bf16_row(src + start, (ggml_bf16_t *)dst + start, n); ggml_fp32_to_bf16_row_ref(src + start, (ggml_bf16_t *)dst + start, n);
result = n * elemsize; result = n * elemsize;
} break; } break;
case GGML_TYPE_F32: case GGML_TYPE_F32: