Skip to content

Commit 4f8b05a

Browse files
authored
convert_hf_to_gguf.py : conversion from hf weights to Q6_0 (#483)
* Direct conversion from fp16 to Q6_0 * forgotten comma * More precise infos
1 parent 7a8abe2 commit 4f8b05a

File tree

3 files changed

+40
-14
lines changed

3 files changed

+40
-14
lines changed

convert_hf_to_gguf.py

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -313,6 +313,7 @@ def prepare_tensors(self):
313313
gguf.MODEL_TENSOR.OUTPUT,
314314
gguf.MODEL_TENSOR.ATTN_V,
315315
gguf.MODEL_TENSOR.ATTN_K,
316+
gguf.MODEL_TENSOR.ATTN_QKV,
316317
)
317318
):
318319
if self.ftype in (
@@ -323,9 +324,8 @@ def prepare_tensors(self):
323324
elif self.ftype in (
324325
gguf.LlamaFileType.MOSTLY_Q5_0,
325326
gguf.LlamaFileType.MOSTLY_Q5_1,
326-
# gguf.LlamaFileType.MOSTLY_Q6_0,
327327
):
328-
data_qtype = gguf.GGMLQuantizationType.Q8_0
328+
data_qtype = gguf.GGMLQuantizationType.Q6_0
329329

330330
# No override (data_qtype is False), or wants to be quantized (data_qtype is True)
331331
if isinstance(data_qtype, bool):
@@ -343,8 +343,8 @@ def prepare_tensors(self):
343343
data_qtype = gguf.GGMLQuantizationType.Q5_0
344344
elif self.ftype == gguf.LlamaFileType.MOSTLY_Q5_1:
345345
data_qtype = gguf.GGMLQuantizationType.Q5_1
346-
# elif self.ftype == gguf.LlamaFileType.MOSTLY_Q6_0: // To be implemented?
347-
# data_qtype = gguf.GGMLQuantizationType.Q6_0
346+
elif self.ftype == gguf.LlamaFileType.MOSTLY_Q6_0:
347+
data_qtype = gguf.GGMLQuantizationType.Q6_0
348348
elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0:
349349
data_qtype = gguf.GGMLQuantizationType.Q8_0
350350
else:
@@ -419,12 +419,12 @@ def prepare_metadata(self, vocab_only: bool):
419419
logger.info("Set model quantization version")
420420
self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
421421

422-
logger.info("****************************************************************************************")
423-
logger.info("** quantizing to `Q4_0`,`Q4_1`,`Q5_0`, or `Q5_1`is not equiv to using `llama-quantize`")
424-
logger.info("** `Q4_0`,`Q4_1` are here using embeddings, output, attn_k and attn_v in q5_0")
425-
logger.info("** `Q5_0`,`Q5_1` are here using embeddings, output, attn_k and attn_v in q8_0")
426-
logger.info("** This, in order to generate a small but reliable conversion to create an iMatrix file.")
427-
logger.info("****************************************************************************************")
422+
logger.info("***********************************************************************************************")
423+
logger.info("** Converting to `q4_0`,`q4_1`,`q5_0`, `q5_1` or `q6_0` is not equiv to using `llama-quantize`!")
424+
logger.info("** Ftype `q4_0`,`q4_1` are here converting embeddings, output, attn_k and attn_v/qkv in q5_0.")
425+
logger.info("** Ftype `q5_0`,`q5_1` are here converting embeddings, output, attn_k and attn_v/qkv in q6_0.")
426+
logger.info("** This, in order to create a small but viable conv. to then for example make an iMatrix file.")
427+
logger.info("***********************************************************************************************")
428428

429429
def write(self):
430430
self.prepare_tensors()
@@ -4113,8 +4113,8 @@ def parse_args() -> argparse.Namespace:
41134113
help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
41144114
)
41154115
parser.add_argument(
4116-
"--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "q4_0", "q4_1", "q5_0", "q5_1", "auto"], default="f16",
4117-
help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, q4_0, q4_1, q5_0, q5_1 for a smaller conversion to then create an iMatrix file for example, and auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
4116+
"--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "q4_0", "q4_1", "q5_0", "q5_1", "q6_0", "auto"], default="f16",
4117+
help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, q4_0, q4_1, q5_0, q5_1, q6_0 for a smaller conversion to then create an iMatrix file for example, and auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
41184118
)
41194119
parser.add_argument(
41204120
"--bigendian", action="store_true",
@@ -4204,7 +4204,7 @@ def main() -> None:
42044204
"q4_1": gguf.LlamaFileType.MOSTLY_Q4_1,
42054205
"q5_0": gguf.LlamaFileType.MOSTLY_Q5_0,
42064206
"q5_1": gguf.LlamaFileType.MOSTLY_Q5_1,
4207-
# "q6_0": gguf.LlamaFileType.MOSTLY_Q6_0,
4207+
"q6_0": gguf.LlamaFileType.MOSTLY_Q6_0,
42084208
"q8_0": gguf.LlamaFileType.MOSTLY_Q8_0,
42094209
"auto": gguf.LlamaFileType.GUESSED,
42104210
}

gguf-py/gguf/quants.py

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -377,6 +377,32 @@ def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
377377
return (d * qs) + m
378378

379379

380+
class Q6_0(__Quant, qtype=GGMLQuantizationType.Q6_0):
381+
@classmethod
382+
def quantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
383+
n_blocks = blocks.shape[0]
384+
385+
imax = abs(blocks).argmax(axis=-1, keepdims=True)
386+
max = np.take_along_axis(blocks, imax, axis=-1)
387+
388+
d = max / -32
389+
with np.errstate(divide="ignore"):
390+
id = np.where(d == 0, 0, 1 / d)
391+
# Adapted from Q5_0
392+
q = np.trunc((np.float64(blocks) * np.float64(id)) + np.float64(32.5), dtype=np.float32).astype(np.uint8).clip(0, 63)
393+
394+
qs = q.reshape((n_blocks, 2, cls.block_size // 2))
395+
qs = (qs[..., 0, :] & np.uint8(0x0F)) | (qs[..., 1, :] << np.uint8(4))
396+
397+
qh = np.zeros((n_blocks, cls.block_size // 4), dtype=np.uint8)
398+
for j in range(cls.block_size // 2):
399+
h = ((q[:, j] >> 4) | ((q[:, j + cls.block_size // 2] >> 4) << 2)).astype(np.uint8)
400+
qh[:, j % (cls.block_size // 4)] |= (h << 4 * (j // (cls.block_size // 4)))
401+
402+
d = d.astype(np.float16).view(np.uint8)
403+
404+
return np.concatenate([d, qh, qs], axis=-1)
405+
380406
class Q8_0(__Quant, qtype=GGMLQuantizationType.Q8_0):
381407
@classmethod
382408
# Implementation of Q8_0 with bit-exact same results as reference implementation in ggml-quants.c

gguf-py/tests/test_quants.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ def __init__(self, libggml: Path):
6464
self.libggml.ggml_quantize_requires_imatrix.argtypes = (ctypes.c_int,)
6565

6666
for t in (
67-
"q4_0", "q4_1", "q5_0", "q5_1", "q8_0",
67+
"q4_0", "q4_1", "q5_0", "q5_1", "q8_0", "q6_0",
6868
"q2_K", "q3_K", "q4_K", "q5_K", "q6_K",
6969
"iq2_xxs", "iq2_xs", "iq2_s", "iq3_xxs", "iq3_s", "iq1_s", "iq1_m",
7070
"iq4_nl", "iq4_xs",

0 commit comments

Comments
 (0)