L'API Gemini peut générer des sorties textuelles à partir de diverses entrées, y compris du texte, des images, des vidéos et de l'audio, en s'appuyant sur les modèles Gemini.
Voici un exemple de base qui prend une seule entrée de texte :
Python
from google import genai
client = genai.Client()
response = client.models.generate_content(
model="gemini-2.5-flash",
contents="How does AI work?"
)
print(response.text)
JavaScript
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({});
async function main() {
const response = await ai.models.generateContent({
model: "gemini-2.5-flash",
contents: "How does AI work?",
});
console.log(response.text);
}
await main();
Go
package main
import (
"context"
"fmt"
"os"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, nil)
if err != nil {
log.Fatal(err)
}
result, _ := client.Models.GenerateContent(
ctx,
"gemini-2.5-flash",
genai.Text("Explain how AI works in a few words"),
nil,
)
fmt.Println(result.Text())
}
REST
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent" \
-H "x-goog-api-key: $GEMINI_API_KEY" \
-H 'Content-Type: application/json' \
-X POST \
-d '{
"contents": [
{
"parts": [
{
"text": "How does AI work?"
}
]
}
]
}'
Apps Script
// See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
const payload = {
contents: [
{
parts: [
{ text: 'How AI does work?' },
],
},
],
};
const url = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent';
const options = {
method: 'POST',
contentType: 'application/json',
headers: {
'x-goog-api-key': apiKey,
},
payload: JSON.stringify(payload)
};
const response = UrlFetchApp.fetch(url, options);
const data = JSON.parse(response);
const content = data['candidates'][0]['content']['parts'][0]['text'];
console.log(content);
}
Réfléchir avec Gemini 2.5
Les modèles Flash et Pro 2.5 sont "réfléchis" par défaut pour améliorer la qualité, ce qui peut prendre plus de temps et augmenter l'utilisation de jetons.
Lorsque vous utilisez 2.5 Flash, vous pouvez désactiver la réflexion en définissant le budget de réflexion sur zéro.
Pour en savoir plus, consultez le guide de réflexion.
Python
from google import genai
from google.genai import types
client = genai.Client()
response = client.models.generate_content(
model="gemini-2.5-flash",
contents="How does AI work?",
config=types.GenerateContentConfig(
thinking_config=types.ThinkingConfig(thinking_budget=0) # Disables thinking
),
)
print(response.text)
JavaScript
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({});
async function main() {
const response = await ai.models.generateContent({
model: "gemini-2.5-flash",
contents: "How does AI work?",
config: {
thinkingConfig: {
thinkingBudget: 0, // Disables thinking
},
}
});
console.log(response.text);
}
await main();
Go
package main
import (
"context"
"fmt"
"os"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, nil)
if err != nil {
log.Fatal(err)
}
result, _ := client.Models.GenerateContent(
ctx,
"gemini-2.5-flash",
genai.Text("How does AI work?"),
&genai.GenerateContentConfig{
ThinkingConfig: &genai.ThinkingConfig{
ThinkingBudget: int32(0), // Disables thinking
},
}
)
fmt.Println(result.Text())
}
REST
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent" \
-H "x-goog-api-key: $GEMINI_API_KEY" \
-H 'Content-Type: application/json' \
-X POST \
-d '{
"contents": [
{
"parts": [
{
"text": "How does AI work?"
}
]
}
],
"generationConfig": {
"thinkingConfig": {
"thinkingBudget": 0
}
}
}'
Apps Script
// See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
const payload = {
contents: [
{
parts: [
{ text: 'How AI does work?' },
],
},
],
};
const url = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent';
const options = {
method: 'POST',
contentType: 'application/json',
headers: {
'x-goog-api-key': apiKey,
},
payload: JSON.stringify(payload)
};
const response = UrlFetchApp.fetch(url, options);
const data = JSON.parse(response);
const content = data['candidates'][0]['content']['parts'][0]['text'];
console.log(content);
}
Instructions système et autres configurations
Vous pouvez orienter le comportement des modèles Gemini à l'aide d'instructions système. Pour ce faire, transmettez un objet GenerateContentConfig
.
Python
from google import genai
from google.genai import types
client = genai.Client()
response = client.models.generate_content(
model="gemini-2.5-flash",
config=types.GenerateContentConfig(
system_instruction="You are a cat. Your name is Neko."),
contents="Hello there"
)
print(response.text)
JavaScript
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({});
async function main() {
const response = await ai.models.generateContent({
model: "gemini-2.5-flash",
contents: "Hello there",
config: {
systemInstruction: "You are a cat. Your name is Neko.",
},
});
console.log(response.text);
}
await main();
Go
package main
import (
"context"
"fmt"
"os"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, nil)
if err != nil {
log.Fatal(err)
}
config := &genai.GenerateContentConfig{
SystemInstruction: genai.NewContentFromText("You are a cat. Your name is Neko.", genai.RoleUser),
}
result, _ := client.Models.GenerateContent(
ctx,
"gemini-2.5-flash",
genai.Text("Hello there"),
config,
)
fmt.Println(result.Text())
}
REST
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent" \
-H "x-goog-api-key: $GEMINI_API_KEY" \
-H 'Content-Type: application/json' \
-d '{
"system_instruction": {
"parts": [
{
"text": "You are a cat. Your name is Neko."
}
]
},
"contents": [
{
"parts": [
{
"text": "Hello there"
}
]
}
]
}'
Apps Script
// See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
const systemInstruction = {
parts: [{
text: 'You are a cat. Your name is Neko.'
}]
};
const payload = {
systemInstruction,
contents: [
{
parts: [
{ text: 'Hello there' },
],
},
],
};
const url = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent';
const options = {
method: 'POST',
contentType: 'application/json',
headers: {
'x-goog-api-key': apiKey,
},
payload: JSON.stringify(payload)
};
const response = UrlFetchApp.fetch(url, options);
const data = JSON.parse(response);
const content = data['candidates'][0]['content']['parts'][0]['text'];
console.log(content);
}
L'objet GenerateContentConfig
vous permet également de remplacer les paramètres de génération par défaut, tels que temperature.
Python
from google import genai
from google.genai import types
client = genai.Client()
response = client.models.generate_content(
model="gemini-2.5-flash",
contents=["Explain how AI works"],
config=types.GenerateContentConfig(
temperature=0.1
)
)
print(response.text)
JavaScript
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({});
async function main() {
const response = await ai.models.generateContent({
model: "gemini-2.5-flash",
contents: "Explain how AI works",
config: {
temperature: 0.1,
},
});
console.log(response.text);
}
await main();
Go
package main
import (
"context"
"fmt"
"os"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, nil)
if err != nil {
log.Fatal(err)
}
temp := float32(0.9)
topP := float32(0.5)
topK := float32(20.0)
config := &genai.GenerateContentConfig{
Temperature: &temp,
TopP: &topP,
TopK: &topK,
ResponseMIMEType: "application/json",
}
result, _ := client.Models.GenerateContent(
ctx,
"gemini-2.5-flash",
genai.Text("What is the average size of a swallow?"),
config,
)
fmt.Println(result.Text())
}
REST
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent \
-H "x-goog-api-key: $GEMINI_API_KEY" \
-H 'Content-Type: application/json' \
-X POST \
-d '{
"contents": [
{
"parts": [
{
"text": "Explain how AI works"
}
]
}
],
"generationConfig": {
"stopSequences": [
"Title"
],
"temperature": 1.0,
"topP": 0.8,
"topK": 10
}
}'
Apps Script
// See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
const generationConfig = {
temperature: 1,
topP: 0.95,
topK: 40,
responseMimeType: 'text/plain',
};
const payload = {
generationConfig,
contents: [
{
parts: [
{ text: 'Explain how AI works in a few words' },
],
},
],
};
const url = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent';
const options = {
method: 'POST',
contentType: 'application/json',
headers: {
'x-goog-api-key': apiKey,
},
payload: JSON.stringify(payload)
};
const response = UrlFetchApp.fetch(url, options);
const data = JSON.parse(response);
const content = data['candidates'][0]['content']['parts'][0]['text'];
console.log(content);
}
Consultez GenerateContentConfig
dans notre documentation de référence de l'API pour obtenir la liste complète des paramètres configurables et leurs descriptions.
Entrées multimodales
L'API Gemini accepte les entrées multimodales, ce qui vous permet de combiner du texte avec des fichiers multimédias. L'exemple suivant montre comment fournir une image :
Python
from PIL import Image
from google import genai
client = genai.Client()
image = Image.open("/path/to/organ.png")
response = client.models.generate_content(
model="gemini-2.5-flash",
contents=[image, "Tell me about this instrument"]
)
print(response.text)
JavaScript
import {
GoogleGenAI,
createUserContent,
createPartFromUri,
} from "@google/genai";
const ai = new GoogleGenAI({});
async function main() {
const image = await ai.files.upload({
file: "/path/to/organ.png",
});
const response = await ai.models.generateContent({
model: "gemini-2.5-flash",
contents: [
createUserContent([
"Tell me about this instrument",
createPartFromUri(image.uri, image.mimeType),
]),
],
});
console.log(response.text);
}
await main();
Go
package main
import (
"context"
"fmt"
"os"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, nil)
if err != nil {
log.Fatal(err)
}
imagePath := "/path/to/organ.jpg"
imgData, _ := os.ReadFile(imagePath)
parts := []*genai.Part{
genai.NewPartFromText("Tell me about this instrument"),
&genai.Part{
InlineData: &genai.Blob{
MIMEType: "image/jpeg",
Data: imgData,
},
},
}
contents := []*genai.Content{
genai.NewContentFromParts(parts, genai.RoleUser),
}
result, _ := client.Models.GenerateContent(
ctx,
"gemini-2.5-flash",
contents,
nil,
)
fmt.Println(result.Text())
}
REST
# Use a temporary file to hold the base64 encoded image data
TEMP_B64=$(mktemp)
trap 'rm -f "$TEMP_B64"' EXIT
base64 $B64FLAGS $IMG_PATH > "$TEMP_B64"
# Use a temporary file to hold the JSON payload
TEMP_JSON=$(mktemp)
trap 'rm -f "$TEMP_JSON"' EXIT
cat > "$TEMP_JSON" << EOF
{
"contents": [
{
"parts": [
{
"text": "Tell me about this instrument"
},
{
"inline_data": {
"mime_type": "image/jpeg",
"data": "$(cat "$TEMP_B64")"
}
}
]
}
]
}
EOF
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent" \
-H "x-goog-api-key: $GEMINI_API_KEY" \
-H 'Content-Type: application/json' \
-X POST \
-d "@$TEMP_JSON"
Apps Script
// See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
const imageUrl = 'http://image/url';
const image = getImageData(imageUrl);
const payload = {
contents: [
{
parts: [
{ image },
{ text: 'Tell me about this instrument' },
],
},
],
};
const url = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent';
const options = {
method: 'POST',
contentType: 'application/json',
headers: {
'x-goog-api-key': apiKey,
},
payload: JSON.stringify(payload)
};
const response = UrlFetchApp.fetch(url, options);
const data = JSON.parse(response);
const content = data['candidates'][0]['content']['parts'][0]['text'];
console.log(content);
}
function getImageData(url) {
const blob = UrlFetchApp.fetch(url).getBlob();
return {
mimeType: blob.getContentType(),
data: Utilities.base64Encode(blob.getBytes())
};
}
Pour découvrir d'autres méthodes permettant de fournir des images et d'effectuer un traitement plus avancé des images, consultez notre guide sur la compréhension des images. L'API est également compatible avec la compréhension et les entrées de documents, de vidéos et d'audio.
Réponses en streaming
Par défaut, le modèle ne renvoie une réponse qu'une fois tout le processus de génération terminé.
Pour des interactions plus fluides, utilisez le streaming pour recevoir les instances GenerateContentResponse
de manière incrémentielle à mesure qu'elles sont générées.
Python
from google import genai
client = genai.Client()
response = client.models.generate_content_stream(
model="gemini-2.5-flash",
contents=["Explain how AI works"]
)
for chunk in response:
print(chunk.text, end="")
JavaScript
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({});
async function main() {
const response = await ai.models.generateContentStream({
model: "gemini-2.5-flash",
contents: "Explain how AI works",
});
for await (const chunk of response) {
console.log(chunk.text);
}
}
await main();
Go
package main
import (
"context"
"fmt"
"os"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, nil)
if err != nil {
log.Fatal(err)
}
stream := client.Models.GenerateContentStream(
ctx,
"gemini-2.5-flash",
genai.Text("Write a story about a magic backpack."),
nil,
)
for chunk, _ := range stream {
part := chunk.Candidates[0].Content.Parts[0]
fmt.Print(part.Text)
}
}
REST
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:streamGenerateContent?alt=sse" \
-H "x-goog-api-key: $GEMINI_API_KEY" \
-H 'Content-Type: application/json' \
--no-buffer \
-d '{
"contents": [
{
"parts": [
{
"text": "Explain how AI works"
}
]
}
]
}'
Apps Script
// See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
const payload = {
contents: [
{
parts: [
{ text: 'Explain how AI works' },
],
},
],
};
const url = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:streamGenerateContent';
const options = {
method: 'POST',
contentType: 'application/json',
headers: {
'x-goog-api-key': apiKey,
},
payload: JSON.stringify(payload)
};
const response = UrlFetchApp.fetch(url, options);
const data = JSON.parse(response);
const content = data['candidates'][0]['content']['parts'][0]['text'];
console.log(content);
}
Conversations multitours (chat)
Nos SDK fournissent des fonctionnalités permettant de collecter plusieurs séries de requêtes et de réponses dans un chat, ce qui vous permet de suivre facilement l'historique des conversations.
Python
from google import genai
client = genai.Client()
chat = client.chats.create(model="gemini-2.5-flash")
response = chat.send_message("I have 2 dogs in my house.")
print(response.text)
response = chat.send_message("How many paws are in my house?")
print(response.text)
for message in chat.get_history():
print(f'role - {message.role}',end=": ")
print(message.parts[0].text)
JavaScript
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({});
async function main() {
const chat = ai.chats.create({
model: "gemini-2.5-flash",
history: [
{
role: "user",
parts: [{ text: "Hello" }],
},
{
role: "model",
parts: [{ text: "Great to meet you. What would you like to know?" }],
},
],
});
const response1 = await chat.sendMessage({
message: "I have 2 dogs in my house.",
});
console.log("Chat response 1:", response1.text);
const response2 = await chat.sendMessage({
message: "How many paws are in my house?",
});
console.log("Chat response 2:", response2.text);
}
await main();
Go
package main
import (
"context"
"fmt"
"os"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, nil)
if err != nil {
log.Fatal(err)
}
history := []*genai.Content{
genai.NewContentFromText("Hi nice to meet you! I have 2 dogs in my house.", genai.RoleUser),
genai.NewContentFromText("Great to meet you. What would you like to know?", genai.RoleModel),
}
chat, _ := client.Chats.Create(ctx, "gemini-2.5-flash", nil, history)
res, _ := chat.SendMessage(ctx, genai.Part{Text: "How many paws are in my house?"})
if len(res.Candidates) > 0 {
fmt.Println(res.Candidates[0].Content.Parts[0].Text)
}
}
REST
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent \
-H "x-goog-api-key: $GEMINI_API_KEY" \
-H 'Content-Type: application/json' \
-X POST \
-d '{
"contents": [
{
"role": "user",
"parts": [
{
"text": "Hello"
}
]
},
{
"role": "model",
"parts": [
{
"text": "Great to meet you. What would you like to know?"
}
]
},
{
"role": "user",
"parts": [
{
"text": "I have two dogs in my house. How many paws are in my house?"
}
]
}
]
}'
Apps Script
// See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
const payload = {
contents: [
{
role: 'user',
parts: [
{ text: 'Hello' },
],
},
{
role: 'model',
parts: [
{ text: 'Great to meet you. What would you like to know?' },
],
},
{
role: 'user',
parts: [
{ text: 'I have two dogs in my house. How many paws are in my house?' },
],
},
],
};
const url = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent';
const options = {
method: 'POST',
contentType: 'application/json',
headers: {
'x-goog-api-key': apiKey,
},
payload: JSON.stringify(payload)
};
const response = UrlFetchApp.fetch(url, options);
const data = JSON.parse(response);
const content = data['candidates'][0]['content']['parts'][0]['text'];
console.log(content);
}
Le streaming peut également être utilisé pour les conversations multitours.
Python
from google import genai
client = genai.Client()
chat = client.chats.create(model="gemini-2.5-flash")
response = chat.send_message_stream("I have 2 dogs in my house.")
for chunk in response:
print(chunk.text, end="")
response = chat.send_message_stream("How many paws are in my house?")
for chunk in response:
print(chunk.text, end="")
for message in chat.get_history():
print(f'role - {message.role}', end=": ")
print(message.parts[0].text)
JavaScript
import { GoogleGenAI } from "@google/genai";
const ai = new GoogleGenAI({});
async function main() {
const chat = ai.chats.create({
model: "gemini-2.5-flash",
history: [
{
role: "user",
parts: [{ text: "Hello" }],
},
{
role: "model",
parts: [{ text: "Great to meet you. What would you like to know?" }],
},
],
});
const stream1 = await chat.sendMessageStream({
message: "I have 2 dogs in my house.",
});
for await (const chunk of stream1) {
console.log(chunk.text);
console.log("_".repeat(80));
}
const stream2 = await chat.sendMessageStream({
message: "How many paws are in my house?",
});
for await (const chunk of stream2) {
console.log(chunk.text);
console.log("_".repeat(80));
}
}
await main();
Go
package main
import (
"context"
"fmt"
"os"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
client, err := genai.NewClient(ctx, nil)
if err != nil {
log.Fatal(err)
}
history := []*genai.Content{
genai.NewContentFromText("Hi nice to meet you! I have 2 dogs in my house.", genai.RoleUser),
genai.NewContentFromText("Great to meet you. What would you like to know?", genai.RoleModel),
}
chat, _ := client.Chats.Create(ctx, "gemini-2.5-flash", nil, history)
stream := chat.SendMessageStream(ctx, genai.Part{Text: "How many paws are in my house?"})
for chunk, _ := range stream {
part := chunk.Candidates[0].Content.Parts[0]
fmt.Print(part.Text)
}
}
REST
curl https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:streamGenerateContent?alt=sse \
-H "x-goog-api-key: $GEMINI_API_KEY" \
-H 'Content-Type: application/json' \
-X POST \
-d '{
"contents": [
{
"role": "user",
"parts": [
{
"text": "Hello"
}
]
},
{
"role": "model",
"parts": [
{
"text": "Great to meet you. What would you like to know?"
}
]
},
{
"role": "user",
"parts": [
{
"text": "I have two dogs in my house. How many paws are in my house?"
}
]
}
]
}'
Apps Script
// See https://developers.google.com/apps-script/guides/properties
// for instructions on how to set the API key.
const apiKey = PropertiesService.getScriptProperties().getProperty('GEMINI_API_KEY');
function main() {
const payload = {
contents: [
{
role: 'user',
parts: [
{ text: 'Hello' },
],
},
{
role: 'model',
parts: [
{ text: 'Great to meet you. What would you like to know?' },
],
},
{
role: 'user',
parts: [
{ text: 'I have two dogs in my house. How many paws are in my house?' },
],
},
],
};
const url = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:streamGenerateContent';
const options = {
method: 'POST',
contentType: 'application/json',
headers: {
'x-goog-api-key': apiKey,
},
payload: JSON.stringify(payload)
};
const response = UrlFetchApp.fetch(url, options);
const data = JSON.parse(response);
const content = data['candidates'][0]['content']['parts'][0]['text'];
console.log(content);
}
Modèles compatibles
Tous les modèles de la famille Gemini sont compatibles avec la génération de texte. Pour en savoir plus sur les modèles et leurs fonctionnalités, consultez la page Modèles.
Bonnes pratiques
Conseils pour rédiger des requêtes
Pour la génération de texte de base, un prompt zero-shot suffit souvent sans avoir besoin d'exemples, d'instructions système ni de mise en forme spécifique.
Pour obtenir des résultats plus personnalisés :
- Utilisez les instructions système pour guider le modèle.
- Fournissez quelques exemples d'entrées et de sorties pour guider le modèle. On parle souvent de requêtes few-shot.
Pour obtenir d'autres conseils, consultez notre guide sur l'ingénierie des requêtes.
Sortie structurée
Dans certains cas, vous aurez peut-être besoin d'une sortie structurée, comme JSON. Pour en savoir plus, consultez notre guide sur les sorties structurées.
Étape suivante
- Essayez le Colab de démarrage de l'API Gemini.
- Découvrez les capacités de compréhension des images, des vidéos, des contenus audio et des documents de Gemini.
- Découvrez les stratégies de requêtes pour les fichiers multimodaux.