diff --git a/examples/gemini/javascript/langchain_quickstart_node/main.js b/examples/gemini/javascript/langchain_quickstart_node/main.js
index 14d8ed958..c8f43b84a 100644
--- a/examples/gemini/javascript/langchain_quickstart_node/main.js
+++ b/examples/gemini/javascript/langchain_quickstart_node/main.js
@@ -39,10 +39,10 @@ async function invokeGeminiPro() {
}
/**
- * Creates a Gemini Pro Vision multimodal chat model, invokes the model with an
+ * Creates a Gemini Flash multimodal chat model, invokes the model with an
* input containing text and image data, and logs the result.
*/
-async function invokeGeminiProVision() {
+async function invokeGeminiFlash() {
const model = new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash',
maxOutputTokens: 1024,
@@ -87,7 +87,7 @@ async function embedText() {
*/
async function run() {
invokeGeminiPro();
- invokeGeminiProVision();
+ invokeGeminiFlash();
embedText();
}
diff --git a/site/en/gemini-api/docs/get-started/python.ipynb b/site/en/gemini-api/docs/get-started/python.ipynb
index bb96b7435..a9634df68 100644
--- a/site/en/gemini-api/docs/get-started/python.ipynb
+++ b/site/en/gemini-api/docs/get-started/python.ipynb
@@ -18,7 +18,7 @@
},
"outputs": [],
"source": [
- "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# @title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
@@ -157,8 +157,8 @@
"\n",
"\n",
"def to_markdown(text):\n",
- " text = text.replace('•', ' *')\n",
- " return Markdown(textwrap.indent(text, '> ', predicate=lambda _: True))"
+ " text = text.replace(\"•\", \" *\")\n",
+ " return Markdown(textwrap.indent(text, \"> \", predicate=lambda _: True))"
]
},
{
@@ -183,7 +183,9 @@
"\n",
"Before you can use the Gemini API, you must first obtain an API key. If you don't already have one, create a key with one click in Google AI Studio.\n",
"\n",
- "Get an API key"
+ "Get an API key\n",
+ "\n",
+ "Note that depending on where you are located, you might have to [enable billing](https://ai.google.dev/gemini-api/docs/billing#enable-cloud-billing) since the free tier is not available in [EEA (including EU), the UK, and CH](https://ai.google.dev/gemini-api/docs/billing#is-Gemini-free-in-EEA-UK-CH)"
]
},
{
@@ -192,7 +194,7 @@
"id": "tHhsUxDTdw0W"
},
"source": [
- "In Colab, add the key to the secrets manager under the \"🔑\" in the left panel. Give it the name `GOOGLE_API_KEY`."
+ "In Colab, add the key to the secrets manager under the \"🔑\" in the left panel. Give it the name `GEMINI_API_KEY`."
]
},
{
@@ -203,7 +205,7 @@
"source": [
"Once you have the API key, pass it to the SDK. You can do this in two ways:\n",
"\n",
- "* Put the key in the `GOOGLE_API_KEY` environment variable (the SDK will automatically pick it up from there).\n",
+ "* Put the key in the `GEMINI_API_KEY` environment variable (the SDK will automatically pick it up from there).\n",
"* Pass the key to `genai.configure(api_key=...)`"
]
},
@@ -215,10 +217,10 @@
},
"outputs": [],
"source": [
- "# Or use `os.getenv('GOOGLE_API_KEY')` to fetch an environment variable.\n",
- "GOOGLE_API_KEY=userdata.get('GOOGLE_API_KEY')\n",
+ "# Or use `os.getenv('GEMINI_API_KEY')` to fetch an environment variable.\n",
+ "GOOGLE_API_KEY = userdata.get(\"GEMINI_API_KEY\")\n",
"\n",
- "genai.configure(api_key=GOOGLE_API_KEY)"
+ "genai.configure(api_key=GEMINI_API_KEY)"
]
},
{
@@ -231,8 +233,8 @@
"\n",
"Now you're ready to call the Gemini API. Use `list_models` to see the available Gemini models:\n",
"\n",
- "* `gemini-1.5-pro`: optimized for high intelligence tasks, the most powerful Gemini model\n",
- "* `gemini-1.5-flash`: optimized for multi-modal use-cases where speed and cost are important"
+ "* `gemini-1.5-flash`: optimized for multi-modal use-cases where speed and cost are important. This should be your go-to model.\n",
+ "* `gemini-1.5-pro`: optimized for high intelligence tasks, the most powerful Gemini model"
]
},
{
@@ -244,8 +246,8 @@
"outputs": [],
"source": [
"for m in genai.list_models():\n",
- " if 'generateContent' in m.supported_generation_methods:\n",
- " print(m.name)"
+ " if \"generateContent\" in m.supported_generation_methods:\n",
+ " print(m.name)"
]
},
{
@@ -254,7 +256,7 @@
"id": "FTl5NjtrhA0J"
},
"source": [
- "Note: For detailed information about the available models, including their capabilities and rate limits, see [Gemini models](https://ai.google.dev/models/gemini). There are options for requesting [rate limit increases](https://ai.google.dev/docs/increase_quota). The rate limit for Gemini-Pro models is 60 requests per minute (RPM).\n",
+ "Note: For detailed information about the available models, including their capabilities and rate limits, see [Gemini models](https://ai.google.dev/models/gemini). There are options for requesting [rate limit increases](https://ai.google.dev/docs/increase_quota). The rate limit for Gemini-Flash models is 15 requests per minute (RPM) for free ([in supported countries](https://ai.google.dev/gemini-api/docs/billing#is-Gemini-free-in-EEA-UK-CH)).\n",
"\n",
"The `genai` package also supports the PaLM family of models, but only the Gemini models support the generic, multimodal capabilities of the `generateContent` method."
]
@@ -267,7 +269,7 @@
"source": [
"## Generate text from text inputs\n",
"\n",
- "For text-only prompts, use the `gemini-pro` model:"
+ "Always start with the 'gemini-1.5-flash' model. It should be sufficient for most of your tasks:"
]
},
{
@@ -278,7 +280,7 @@
},
"outputs": [],
"source": [
- "model = genai.GenerativeModel('gemini-1.5-flash')"
+ "model = genai.GenerativeModel(\"gemini-1.5-flash\")"
]
},
{
@@ -287,9 +289,9 @@
"id": "WR_2A_sxk8sK"
},
"source": [
- "The `generate_content` method can handle a wide variety of use cases, including multi-turn chat and multimodal input, depending on what the underlying model supports. The available models only support text and images as input, and text as output.\n",
+ "The `generate_content` method can handle a wide variety of use cases, including multi-turn chat and multimodal input, depending on what the underlying model supports. At the moment, the available models support text, images and videos as input, and text as output.\n",
"\n",
- "In the simplest case, you can pass a prompt string to the GenerativeModel.generate_content
method:"
+ "In the simplest case, you can pass a prompt string to the GenerativeModel.generate_content
method:"
]
},
{
@@ -542,8 +544,8 @@
],
"source": [
"for chunk in response:\n",
- " print(chunk.text)\n",
- " print(\"_\"*80)"
+ " print(chunk.text)\n",
+ " print(\"_\" * 80)"
]
},
{
@@ -639,9 +641,9 @@
],
"source": [
"try:\n",
- " response.text\n",
+ " response.text\n",
"except Exception as e:\n",
- " print(f'{type(e).__name__}: {e}')"
+ " print(f\"{type(e).__name__}: {e}\")"
]
},
{
@@ -702,7 +704,7 @@
"source": [
"import PIL.Image\n",
"\n",
- "img = PIL.Image.open('image.jpg')\n",
+ "img = PIL.Image.open(\"image.jpg\")\n",
"img"
]
},
@@ -723,7 +725,7 @@
},
"outputs": [],
"source": [
- "model = genai.GenerativeModel('gemini-1.5-flash')"
+ "model = genai.GenerativeModel(\"gemini-1.5-flash\")"
]
},
{
@@ -770,7 +772,13 @@
},
"outputs": [],
"source": [
- "response = model.generate_content([\"Write a short, engaging blog post based on this picture. It should include a description of the meal in the photo and talk about my journey meal prepping.\", img], stream=True)\n",
+ "response = model.generate_content(\n",
+ " [\n",
+ " \"Write a short, engaging blog post based on this picture. It should include a description of the meal in the photo and talk about my journey meal prepping.\",\n",
+ " img,\n",
+ " ],\n",
+ " stream=True,\n",
+ ")\n",
"response.resolve()"
]
},
@@ -839,7 +847,7 @@
}
],
"source": [
- "model = genai.GenerativeModel('gemini-1.5-flash')\n",
+ "model = genai.GenerativeModel(\"gemini-1.5-flash\")\n",
"chat = model.start_chat(history=[])\n",
"chat"
]
@@ -850,7 +858,7 @@
"id": "5odluV7kKbgr"
},
"source": [
- "The `ChatSession.send_message` method returns the same `GenerateContentResponse` type as GenerativeModel.generate_content
. It also appends your message and the response to the chat history:"
+ "The `ChatSession.send_message` method returns the same `GenerateContentResponse` type as GenerativeModel.generate_content
. It also appends your message and the response to the chat history:"
]
},
{
@@ -875,7 +883,9 @@
}
],
"source": [
- "response = chat.send_message(\"In one sentence, explain how a computer works to a young child.\")\n",
+ "response = chat.send_message(\n",
+ " \"In one sentence, explain how a computer works to a young child.\"\n",
+ ")\n",
"to_markdown(response.text)"
]
},
@@ -944,11 +954,13 @@
}
],
"source": [
- "response = chat.send_message(\"Okay, how about a more detailed explanation to a high schooler?\", stream=True)\n",
+ "response = chat.send_message(\n",
+ " \"Okay, how about a more detailed explanation to a high schooler?\", stream=True\n",
+ ")\n",
"\n",
"for chunk in response:\n",
- " print(chunk.text)\n",
- " print(\"_\"*80)"
+ " print(chunk.text)\n",
+ " print(\"_\" * 80)"
]
},
{
@@ -957,7 +969,7 @@
"id": "AwCqtZ6D4kvk"
},
"source": [
- "`genai.protos.Content` objects contain a list of `genai.protos.Part` objects that each contain either a text (string) or inline_data (`genai.protos.Blob`), where a blob contains binary data and a `mime_type`. The chat history is available as a list of `genai.protos.Content` objects in `ChatSession.history`:"
+ "[`genai.protos.Content`](https://github.com/google-gemini/generative-ai-python/blob/main/docs/api/google/generativeai/protos/Content.md) objects contain a list of [`genai.protos.Part`](https://github.com/google-gemini/generative-ai-python/blob/main/docs/api/google/generativeai/protos/Part.md) objects that each contain either a text (string) or inline_data ([`genai.protos.Blob`](https://github.com/google-gemini/generative-ai-python/blob/main/docs/api/google/generativeai/protos/Blob.md)), where a blob contains binary data and a `mime_type`. The chat history is available as a list of `genai.protos.Content` objects in `ChatSession.history`:"
]
},
{
@@ -1022,7 +1034,7 @@
],
"source": [
"for message in chat.history:\n",
- " display(to_markdown(f'**{message.role}**: {message.parts[0].text}'))"
+ " display(to_markdown(f\"**{message.role}**: {message.parts[0].text}\"))"
]
},
{
@@ -1083,13 +1095,24 @@
"model.count_tokens(chat.history)"
]
},
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "vuz9-TWDzdlb"
+ },
+ "source": [
+ "## Advanced use cases\n",
+ "\n",
+ "The following sections discuss advanced use cases and lower-level details of the Python SDK for the Gemini API."
+ ]
+ },
{
"cell_type": "markdown",
"metadata": {
"id": "f9bU0J3vUIbz"
},
"source": [
- "## Use embeddings"
+ "### Use embeddings"
]
},
{
@@ -1133,10 +1156,11 @@
" model=\"models/text-embedding-004\",\n",
" content=\"What is the meaning of life?\",\n",
" task_type=\"retrieval_document\",\n",
- " title=\"Embedding of single string\")\n",
+ " title=\"Embedding of single string\",\n",
+ ")\n",
"\n",
"# 1 input > 1 vector output\n",
- "print(str(result['embedding'])[:50], '... TRIMMED]')"
+ "print(str(result[\"embedding\"])[:50], \"... TRIMMED]\")"
]
},
{
@@ -1171,15 +1195,17 @@
"result = genai.embed_content(\n",
" model=\"models/text-embedding-004\",\n",
" content=[\n",
- " 'What is the meaning of life?',\n",
- " 'How much wood would a woodchuck chuck?',\n",
- " 'How does the brain work?'],\n",
+ " \"What is the meaning of life?\",\n",
+ " \"How much wood would a woodchuck chuck?\",\n",
+ " \"How does the brain work?\",\n",
+ " ],\n",
" task_type=\"retrieval_document\",\n",
- " title=\"Embedding of list of strings\")\n",
+ " title=\"Embedding of list of strings\",\n",
+ ")\n",
"\n",
"# A list of inputs > A list of vectors output\n",
- "for v in result['embedding']:\n",
- " print(str(v)[:50], '... TRIMMED ...')"
+ "for v in result[\"embedding\"]:\n",
+ " print(str(v)[:50], \"... TRIMMED ...\")"
]
},
{
@@ -1235,11 +1261,11 @@
],
"source": [
"result = genai.embed_content(\n",
- " model = 'models/text-embedding-004',\n",
- " content = response.candidates[0].content)\n",
+ " model=\"models/text-embedding-004\", content=response.candidates[0].content\n",
+ ")\n",
"\n",
"# 1 input > 1 vector output\n",
- "print(str(result['embedding'])[:50], '... TRIMMED ...')"
+ "print(str(result[\"embedding\"])[:50], \"... TRIMMED ...\")"
]
},
{
@@ -1307,24 +1333,11 @@
}
],
"source": [
- "result = genai.embed_content(\n",
- " model = 'models/text-embedding-004',\n",
- " content = chat.history)\n",
+ "result = genai.embed_content(model=\"models/text-embedding-004\", content=chat.history)\n",
"\n",
"# 1 input > 1 vector output\n",
- "for i,v in enumerate(result['embedding']):\n",
- " print(str(v)[:50], '... TRIMMED...')"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "vuz9-TWDzdlb"
- },
- "source": [
- "## Advanced use cases\n",
- "\n",
- "The following sections discuss advanced use cases and lower-level details of the Python SDK for the Gemini API."
+ "for i, v in enumerate(result[\"embedding\"]):\n",
+ " print(str(v)[:50], \"... TRIMMED...\")"
]
},
{
@@ -1383,7 +1396,7 @@
}
],
"source": [
- "response = model.generate_content('[Questionable prompt here]')\n",
+ "response = model.generate_content(\"[Questionable prompt here]\")\n",
"response.candidates"
]
},
@@ -1450,8 +1463,9 @@
},
"outputs": [],
"source": [
- "response = model.generate_content('[Questionable prompt here]',\n",
- " safety_settings={'HARASSMENT':'block_none'})\n",
+ "response = model.generate_content(\n",
+ " \"[Questionable prompt here]\", safety_settings={\"HARASSMENT\": \"block_none\"}\n",
+ ")\n",
"response.text"
]
},
@@ -1517,20 +1531,22 @@
},
"outputs": [],
"source": [
- "model = genai.GenerativeModel('gemini-1.5-flash')\n",
+ "model = genai.GenerativeModel(\"gemini-1.5-flash\")\n",
"response = model.generate_content(\n",
" genai.protos.Content(\n",
- " parts = [\n",
- " genai.protos.Part(text=\"Write a short, engaging blog post based on this picture.\"),\n",
+ " parts=[\n",
+ " genai.protos.Part(\n",
+ " text=\"Write a short, engaging blog post based on this picture.\"\n",
+ " ),\n",
" genai.protos.Part(\n",
" inline_data=genai.protos.Blob(\n",
- " mime_type='image/jpeg',\n",
- " data=pathlib.Path('image.jpg').read_bytes()\n",
+ " mime_type=\"image/jpeg\", data=pathlib.Path(\"image.jpg\").read_bytes()\n",
" )\n",
" ),\n",
" ],\n",
" ),\n",
- " stream=True)"
+ " stream=True,\n",
+ ")"
]
},
{
@@ -1611,11 +1627,13 @@
}
],
"source": [
- "model = genai.GenerativeModel('gemini-1.5-flash')\n",
+ "model = genai.GenerativeModel(\"gemini-1.5-flash\")\n",
"\n",
"messages = [\n",
- " {'role':'user',\n",
- " 'parts': [\"Briefly explain how a computer works to a young child.\"]}\n",
+ " {\n",
+ " \"role\": \"user\",\n",
+ " \"parts\": [\"Briefly explain how a computer works to a young child.\"],\n",
+ " }\n",
"]\n",
"response = model.generate_content(messages)\n",
"\n",
@@ -1704,11 +1722,16 @@
}
],
"source": [
- "messages.append({'role':'model',\n",
- " 'parts':[response.text]})\n",
+ "messages.append({\"role\": \"model\", \"parts\": [response.text]})\n",
"\n",
- "messages.append({'role':'user',\n",
- " 'parts':[\"Okay, how about a more detailed explanation to a high school student?\"]})\n",
+ "messages.append(\n",
+ " {\n",
+ " \"role\": \"user\",\n",
+ " \"parts\": [\n",
+ " \"Okay, how about a more detailed explanation to a high school student?\"\n",
+ " ],\n",
+ " }\n",
+ ")\n",
"\n",
"response = model.generate_content(messages)\n",
"\n",
@@ -1734,15 +1757,16 @@
},
"outputs": [],
"source": [
- "model = genai.GenerativeModel('gemini-1.5-flash')\n",
+ "model = genai.GenerativeModel(\"gemini-1.5-flash\")\n",
"response = model.generate_content(\n",
- " 'Tell me a story about a magic backpack.',\n",
+ " \"Tell me a story about a magic backpack.\",\n",
" generation_config=genai.types.GenerationConfig(\n",
" # Only one candidate for now.\n",
" candidate_count=1,\n",
- " stop_sequences=['x'],\n",
+ " stop_sequences=[\"x\"],\n",
" max_output_tokens=20,\n",
- " temperature=1.0)\n",
+ " temperature=1.0,\n",
+ " ),\n",
")"
]
},
@@ -1771,7 +1795,7 @@
"text = response.text\n",
"\n",
"if response.candidates[0].finish_reason.name == \"MAX_TOKENS\":\n",
- " text += '...'\n",
+ " text += \"...\"\n",
"\n",
"to_markdown(text)"
]
diff --git a/site/en/gemini-api/docs/get-started/rest.ipynb b/site/en/gemini-api/docs/get-started/rest.ipynb
index 6114babea..6b00cc14f 100644
--- a/site/en/gemini-api/docs/get-started/rest.ipynb
+++ b/site/en/gemini-api/docs/get-started/rest.ipynb
@@ -86,6 +86,7 @@
"id": "ywtfO3mO26KO"
},
"source": [
+ "## Prerequisites\n",
"### Set up your API key\n",
"\n",
"To use the Gemini API, you'll need an API key. If you don't already have one, create a key in Google AI Studio.\n",
@@ -99,9 +100,9 @@
"id": "4EsvRU-s3FJx"
},
"source": [
- "In Colab, add the key to the secrets manager under the \"🔑\" in the left panel. Give it the name `GOOGLE_API_KEY`. You can then add it as an environment variable to pass the key in your curl call.\n",
+ "In Colab, add the key to the secrets manager under the \"🔑\" in the left panel. Give it the name `GEMINI_API_KEY`. You can then add it as an environment variable to pass the key in your curl call.\n",
"\n",
- "In a terminal, you can just run `GOOGLE_API_KEY=\"Your API Key\"`."
+ "In a terminal, you can just run `GEMINI_API_KEY=\"Your API Key\"`."
]
},
{
@@ -115,7 +116,7 @@
"import os\n",
"from google.colab import userdata\n",
"\n",
- "os.environ['GOOGLE_API_KEY'] = userdata.get('GOOGLE_API_KEY')"
+ "os.environ['GEMINI_API_KEY'] = userdata.get('GEMINI_API_KEY')"
]
},
{
@@ -136,7 +137,7 @@
"### Text-only input\n",
"\n",
"Use the `generateContent` method\n",
- "to generate a response from the model given an input message. If the input contains only text, use the `gemini-pro` model."
+ "to generate a response from the model given an input message. Always start with the `gemini-1.5-flash` model."
]
},
{
@@ -209,7 +210,7 @@
],
"source": [
"%%bash\n",
- "curl https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY \\\n",
+ "curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GEMINI_API_KEY \\\n",
" -H 'Content-Type: application/json' \\\n",
" -X POST \\\n",
" -d '{\n",
@@ -319,7 +320,7 @@
],
"source": [
"%%bash\n",
- "curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=${GOOGLE_API_KEY} \\\n",
+ "curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=${GEMINI_API_KEY} \\\n",
" -H 'Content-Type: application/json' \\\n",
" -d @request.json 2> /dev/null | grep \"text\""
]
@@ -352,7 +353,7 @@
],
"source": [
"%%bash\n",
- "curl https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY \\\n",
+ "curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GEMINI_API_KEY \\\n",
" -H 'Content-Type: application/json' \\\n",
" -X POST \\\n",
" -d '{\n",
@@ -403,7 +404,7 @@
],
"source": [
"%%bash\n",
- "curl https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=$GOOGLE_API_KEY \\\n",
+ "curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:generateContent?key=$GEMINI_API_KEY \\\n",
" -H 'Content-Type: application/json' \\\n",
" -X POST \\\n",
" -d '{\n",
@@ -470,7 +471,7 @@
}
],
"source": [
- "!curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:streamGenerateContent?alt=sse&key=${GOOGLE_API_KEY}\" \\\n",
+ "!curl \"https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:streamGenerateContent?alt=sse&key=${GEMINI_API_KEY}\" \\\n",
" -H 'Content-Type: application/json' \\\n",
" --no-buffer \\\n",
" -d '{ \"contents\":[{\"parts\":[{\"text\": \"Write long a story about a magic backpack.\"}]}]}' \\\n",
@@ -518,7 +519,7 @@
],
"source": [
"%%bash\n",
- "curl https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:countTokens?key=$GOOGLE_API_KEY \\\n",
+ "curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash:countTokens?key=$GEMINI_API_KEY \\\n",
" -H 'Content-Type: application/json' \\\n",
" -X POST \\\n",
" -d '{\n",
@@ -587,7 +588,7 @@
],
"source": [
"%%bash\n",
- "curl https://generativelanguage.googleapis.com/v1beta/models/embedding-001:embedContent?key=$GOOGLE_API_KEY \\\n",
+ "curl https://generativelanguage.googleapis.com/v1beta/models/embedding-001:embedContent?key=$GEMINI_API_KEY \\\n",
" -H 'Content-Type: application/json' \\\n",
" -X POST \\\n",
" -d '{\n",
@@ -623,7 +624,7 @@
],
"source": [
"%%bash\n",
- "curl https://generativelanguage.googleapis.com/v1beta/models/embedding-001:batchEmbedContents?key=$GOOGLE_API_KEY \\\n",
+ "curl https://generativelanguage.googleapis.com/v1beta/models/embedding-001:batchEmbedContents?key=$GEMINI_API_KEY \\\n",
" -H 'Content-Type: application/json' \\\n",
" -X POST \\\n",
" -d '{\n",
@@ -684,7 +685,7 @@
}
],
"source": [
- "!curl https://generativelanguage.googleapis.com/v1beta/models/gemini-pro?key=$GOOGLE_API_KEY"
+ "!curl https://generativelanguage.googleapis.com/v1beta/models/gemini-1.5-flash?key=$GEMINI_API_KEY"
]
},
{
@@ -813,7 +814,7 @@
}
],
"source": [
- "!curl https://generativelanguage.googleapis.com/v1beta/models?key=$GOOGLE_API_KEY"
+ "!curl https://generativelanguage.googleapis.com/v1beta/models?key=$GEMINI_API_KEY"
]
}
],