diff --git a/llm-apps-course/notebooks/01. Using_APIs.ipynb b/llm-apps-course/notebooks/01. Using_APIs.ipynb index f564d452..1da8aea7 100644 --- a/llm-apps-course/notebooks/01. Using_APIs.ipynb +++ b/llm-apps-course/notebooks/01. Using_APIs.ipynb @@ -103,7 +103,7 @@ "metadata": {}, "outputs": [], "source": [ - "encoding = tiktoken.encoding_for_model(\"text-davinci-003\")\n", + "encoding = tiktoken.encoding_for_model(\"gpt-3.5-turbo-instruct\")\n", "enc = encoding.encode(\"Weights & Biases is awesome!\")\n", "print(enc)\n", "print(encoding.decode(enc))" @@ -157,7 +157,7 @@ "def generate_with_temperature(temp):\n", " \"Generate text with a given temperature, higher temperature means more randomness\"\n", " response = openai.Completion.create(\n", - " model=\"text-davinci-003\",\n", + " model=\"gpt-3.5-turbo-instruct\",\n", " prompt=\"Say something about Weights & Biases\",\n", " max_tokens=50,\n", " temperature=temp,\n", @@ -191,7 +191,7 @@ "def generate_with_topp(topp):\n", " \"Generate text with a given top-p, higher top-p means more randomness\"\n", " response = openai.Completion.create(\n", - " model=\"text-davinci-003\",\n", + " model=\"gpt-3.5-turbo-instruct\",\n", " prompt=\"Say something about Weights & Biases\",\n", " max_tokens=50,\n", " top_p=topp,\n",