diff --git a/examples/embd-input/README.md b/examples/embd-input/README.md index 02d028f26..5c4c75ea7 100644 --- a/examples/embd-input/README.md +++ b/examples/embd-input/README.md @@ -17,7 +17,7 @@ make import torch bin_path = "../LLaVA-13b-delta-v1-1/pytorch_model-00003-of-00003.bin" -pth_path = "./examples/embd_input/llava_projection.pth" +pth_path = "./examples/embd-input/llava_projection.pth" dic = torch.load(bin_path) used_key = ["model.mm_projector.weight","model.mm_projector.bias"] diff --git a/examples/embd-input/llava.py b/examples/embd-input/llava.py index 2f20cb722..bcbdd2bed 100644 --- a/examples/embd-input/llava.py +++ b/examples/embd-input/llava.py @@ -59,7 +59,7 @@ if __name__=="__main__": # Also here can use pytorch_model-00003-of-00003.bin directly. a.load_projection(os.path.join( os.path.dirname(__file__) , - "llava_projetion.pth")) + "llava_projection.pth")) respose = a.chat_with_image( Image.open("./media/llama1-logo.png").convert('RGB'), "what is the text in the picture?")