minor : improve C++ and Python style (#768)

* use some STL functions

* use self.field than setattr, use pathlib.Path

* recover some format

* const some iter

* Keep the original

* 2 space
This commit is contained in:
AsukaMinato
2023-04-29 16:06:25 +09:00
committed by GitHub
parent 4d89ee2e59
commit 94aa56f19e
4 changed files with 100 additions and 105 deletions

View File

@ -40,7 +40,7 @@ import code
import torch
import numpy as np
import base64
from pathlib import Path
#from transformers import GPTJForCausalLM
#from transformers import GPT2TokenizerFast
@ -194,17 +194,17 @@ if len(sys.argv) < 4:
print("Usage: convert-pt-to-ggml.py model.pt path-to-whisper-repo dir-output [use-f32]\n")
sys.exit(1)
fname_inp = sys.argv[1]
dir_whisper = sys.argv[2]
dir_out = sys.argv[3]
fname_inp = Path(sys.argv[1])
dir_whisper = Path(sys.argv[2])
dir_out = Path(sys.argv[3])
# try to load PyTorch binary data
try:
model_bytes = open(fname_inp, "rb").read()
with io.BytesIO(model_bytes) as fp:
checkpoint = torch.load(fp, map_location="cpu")
except:
print("Error: failed to load PyTorch model file: %s" % fname_inp)
except Exception:
print("Error: failed to load PyTorch model file:" , fname_inp)
sys.exit(1)
hparams = checkpoint["dims"]
@ -218,17 +218,17 @@ list_vars = checkpoint["model_state_dict"]
# load mel filters
n_mels = hparams["n_mels"]
with np.load(os.path.join(dir_whisper, "whisper/assets", "mel_filters.npz")) as f:
with np.load(dir_whisper / "whisper" / "assets" / "mel_filters.npz") as f:
filters = torch.from_numpy(f[f"mel_{n_mels}"])
#print (filters)
#code.interact(local=locals())
multilingual = hparams["n_vocab"] == 51865
tokenizer = os.path.join(dir_whisper, "whisper/assets", multilingual and "multilingual.tiktoken" or "gpt2.tiktoken")
tokenizer = dir_whisper / "whisper" / "assets" / (multilingual and "multilingual.tiktoken" or "gpt2.tiktoken")
# output in the same directory as the model
fname_out = dir_out + "/ggml-model.bin"
fname_out = dir_out / "ggml-model.bin"
with open(tokenizer, "rb") as f:
contents = f.read()
@ -238,9 +238,9 @@ with open(tokenizer, "rb") as f:
use_f16 = True
if len(sys.argv) > 4:
use_f16 = False
fname_out = dir_out + "/ggml-model-f32.bin"
fname_out = dir_out / "ggml-model-f32.bin"
fout = open(fname_out, "wb")
fout = fname_out.open("wb")
fout.write(struct.pack("i", 0x67676d6c)) # magic: ggml in hex
fout.write(struct.pack("i", hparams["n_vocab"]))
@ -273,20 +273,19 @@ for key in tokens:
for name in list_vars.keys():
data = list_vars[name].squeeze().numpy()
print("Processing variable: " + name + " with shape: ", data.shape)
print("Processing variable: " , name , " with shape: ", data.shape)
# reshape conv bias from [n] to [n, 1]
if name == "encoder.conv1.bias" or \
name == "encoder.conv2.bias":
if name in ["encoder.conv1.bias", "encoder.conv2.bias"]:
data = data.reshape(data.shape[0], 1)
print(" Reshaped variable: " + name + " to shape: ", data.shape)
print(f" Reshaped variable: {name} to shape: ", data.shape)
n_dims = len(data.shape);
n_dims = len(data.shape)
# looks like the whisper models are in f16 by default
# so we need to convert the small tensors to f32 until we fully support f16 in ggml
# ftype == 0 -> float32, ftype == 1 -> float16
ftype = 1;
ftype = 1
if use_f16:
if n_dims < 2 or \
name == "encoder.conv1.bias" or \
@ -307,16 +306,16 @@ for name in list_vars.keys():
# data = data.transpose()
# header
str = name.encode('utf-8')
fout.write(struct.pack("iii", n_dims, len(str), ftype))
str_ = name.encode('utf-8')
fout.write(struct.pack("iii", n_dims, len(str_), ftype))
for i in range(n_dims):
fout.write(struct.pack("i", data.shape[n_dims - 1 - i]))
fout.write(str);
fout.write(str_)
# data
data.tofile(fout)
fout.close()
print("Done. Output file: " + fname_out)
print("Done. Output file: " , fname_out)
print("")