diff --git a/aio/cpu/vision.yaml b/aio/cpu/vision.yaml index 3b466d37..4052fa39 100644 --- a/aio/cpu/vision.yaml +++ b/aio/cpu/vision.yaml @@ -2,7 +2,7 @@ backend: llama-cpp context_size: 4096 f16: true mmap: true -name: gpt-4-vision-preview +name: gpt-4o roles: user: "USER:" diff --git a/aio/gpu-8g/vision.yaml b/aio/gpu-8g/vision.yaml index db039279..4f5e10b3 100644 --- a/aio/gpu-8g/vision.yaml +++ b/aio/gpu-8g/vision.yaml @@ -2,7 +2,7 @@ backend: llama-cpp context_size: 4096 f16: true mmap: true -name: gpt-4-vision-preview +name: gpt-4o roles: user: "USER:" diff --git a/aio/intel/vision.yaml b/aio/intel/vision.yaml index 52843162..37067362 100644 --- a/aio/intel/vision.yaml +++ b/aio/intel/vision.yaml @@ -2,7 +2,7 @@ backend: llama-cpp context_size: 4096 mmap: false f16: false -name: gpt-4-vision-preview +name: gpt-4o roles: user: "USER:" diff --git a/tests/e2e-aio/e2e_test.go b/tests/e2e-aio/e2e_test.go index f3f7b106..36d127d2 100644 --- a/tests/e2e-aio/e2e_test.go +++ b/tests/e2e-aio/e2e_test.go @@ -171,7 +171,7 @@ var _ = Describe("E2E test", func() { }) Context("vision", func() { It("correctly", func() { - model := "gpt-4-vision-preview" + model := "gpt-4o" resp, err := client.CreateChatCompletion(context.TODO(), openai.ChatCompletionRequest{ Model: model, Messages: []openai.ChatCompletionMessage{