The Google Gemini adapter provides access to Google's Gemini models, including text generation, image generation with Imagen, and experimental text-to-speech.
npm install @tanstack/ai-gemini
npm install @tanstack/ai-gemini
import { chat } from "@tanstack/ai";
import { geminiText } from "@tanstack/ai-gemini";
const stream = chat({
adapter: geminiText("gemini-2.5-pro"),
messages: [{ role: "user", content: "Hello!" }],
});
import { chat } from "@tanstack/ai";
import { geminiText } from "@tanstack/ai-gemini";
const stream = chat({
adapter: geminiText("gemini-2.5-pro"),
messages: [{ role: "user", content: "Hello!" }],
});
import { chat } from "@tanstack/ai";
import { createGeminiChat } from "@tanstack/ai-gemini";
const adapter = createGeminiChat(process.env.GEMINI_API_KEY!, {
// ... your config options
});
const stream = chat({
adapter: adapter("gemini-2.5-pro"),
messages: [{ role: "user", content: "Hello!" }],
});
import { chat } from "@tanstack/ai";
import { createGeminiChat } from "@tanstack/ai-gemini";
const adapter = createGeminiChat(process.env.GEMINI_API_KEY!, {
// ... your config options
});
const stream = chat({
adapter: adapter("gemini-2.5-pro"),
messages: [{ role: "user", content: "Hello!" }],
});
import { createGeminiChat, type GeminiChatConfig } from "@tanstack/ai-gemini";
const config: Omit<GeminiChatConfig, 'apiKey'> = {
baseURL: "https://generativelanguage.googleapis.com/v1beta", // Optional
};
const adapter = createGeminiChat(process.env.GEMINI_API_KEY!, config);
import { createGeminiChat, type GeminiChatConfig } from "@tanstack/ai-gemini";
const config: Omit<GeminiChatConfig, 'apiKey'> = {
baseURL: "https://generativelanguage.googleapis.com/v1beta", // Optional
};
const adapter = createGeminiChat(process.env.GEMINI_API_KEY!, config);
import { chat, toServerSentEventsResponse } from "@tanstack/ai";
import { geminiText } from "@tanstack/ai-gemini";
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter: geminiText("gemini-2.5-pro"),
messages,
});
return toServerSentEventsResponse(stream);
}
import { chat, toServerSentEventsResponse } from "@tanstack/ai";
import { geminiText } from "@tanstack/ai-gemini";
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter: geminiText("gemini-2.5-pro"),
messages,
});
return toServerSentEventsResponse(stream);
}
import { chat, toolDefinition } from "@tanstack/ai";
import { geminiText } from "@tanstack/ai-gemini";
import { z } from "zod";
const getCalendarEventsDef = toolDefinition({
name: "get_calendar_events",
description: "Get calendar events for a date",
inputSchema: z.object({
date: z.string(),
}),
});
const getCalendarEvents = getCalendarEventsDef.server(async ({ date }) => {
// Fetch calendar events
return { events: [] };
});
const stream = chat({
adapter: geminiText("gemini-2.5-pro"),
messages,
tools: [getCalendarEvents],
});
import { chat, toolDefinition } from "@tanstack/ai";
import { geminiText } from "@tanstack/ai-gemini";
import { z } from "zod";
const getCalendarEventsDef = toolDefinition({
name: "get_calendar_events",
description: "Get calendar events for a date",
inputSchema: z.object({
date: z.string(),
}),
});
const getCalendarEvents = getCalendarEventsDef.server(async ({ date }) => {
// Fetch calendar events
return { events: [] };
});
const stream = chat({
adapter: geminiText("gemini-2.5-pro"),
messages,
tools: [getCalendarEvents],
});
Gemini supports various model-specific options:
const stream = chat({
adapter: geminiText("gemini-2.5-pro"),
messages,
modelOptions: {
maxOutputTokens: 2048,
temperature: 0.7,
topP: 0.9,
topK: 40,
stopSequences: ["END"],
},
});
const stream = chat({
adapter: geminiText("gemini-2.5-pro"),
messages,
modelOptions: {
maxOutputTokens: 2048,
temperature: 0.7,
topP: 0.9,
topK: 40,
stopSequences: ["END"],
},
});
Enable thinking for models that support it:
modelOptions: {
thinking: {
includeThoughts: true,
},
}
modelOptions: {
thinking: {
includeThoughts: true,
},
}
Configure structured output format:
modelOptions: {
responseMimeType: "application/json",
}
modelOptions: {
responseMimeType: "application/json",
}
Summarize long text content:
import { summarize } from "@tanstack/ai";
import { geminiSummarize } from "@tanstack/ai-gemini";
const result = await summarize({
adapter: geminiSummarize("gemini-2.5-pro"),
text: "Your long text to summarize...",
maxLength: 100,
style: "concise", // "concise" | "bullet-points" | "paragraph"
});
console.log(result.summary);
import { summarize } from "@tanstack/ai";
import { geminiSummarize } from "@tanstack/ai-gemini";
const result = await summarize({
adapter: geminiSummarize("gemini-2.5-pro"),
text: "Your long text to summarize...",
maxLength: 100,
style: "concise", // "concise" | "bullet-points" | "paragraph"
});
console.log(result.summary);
Generate images with Imagen:
import { generateImage } from "@tanstack/ai";
import { geminiImage } from "@tanstack/ai-gemini";
const result = await generateImage({
adapter: geminiImage("imagen-3.0-generate-002"),
prompt: "A futuristic cityscape at sunset",
numberOfImages: 1,
});
console.log(result.images);
import { generateImage } from "@tanstack/ai";
import { geminiImage } from "@tanstack/ai-gemini";
const result = await generateImage({
adapter: geminiImage("imagen-3.0-generate-002"),
prompt: "A futuristic cityscape at sunset",
numberOfImages: 1,
});
console.log(result.images);
const result = await generateImage({
adapter: geminiImage("imagen-3.0-generate-002"),
prompt: "...",
modelOptions: {
aspectRatio: "16:9", // "1:1" | "3:4" | "4:3" | "9:16" | "16:9"
personGeneration: "DONT_ALLOW", // Control person generation
safetyFilterLevel: "BLOCK_SOME", // Safety filtering
},
});
const result = await generateImage({
adapter: geminiImage("imagen-3.0-generate-002"),
prompt: "...",
modelOptions: {
aspectRatio: "16:9", // "1:1" | "3:4" | "4:3" | "9:16" | "16:9"
personGeneration: "DONT_ALLOW", // Control person generation
safetyFilterLevel: "BLOCK_SOME", // Safety filtering
},
});
Note: Gemini TTS is experimental and may require the Live API for full functionality.
Generate speech from text:
import { generateSpeech } from "@tanstack/ai";
import { geminiSpeech } from "@tanstack/ai-gemini";
const result = await generateSpeech({
adapter: geminiSpeech("gemini-2.5-flash-preview-tts"),
text: "Hello from Gemini TTS!",
});
console.log(result.audio); // Base64 encoded audio
import { generateSpeech } from "@tanstack/ai";
import { geminiSpeech } from "@tanstack/ai-gemini";
const result = await generateSpeech({
adapter: geminiSpeech("gemini-2.5-flash-preview-tts"),
text: "Hello from Gemini TTS!",
});
console.log(result.audio); // Base64 encoded audio
Set your API key in environment variables:
GEMINI_API_KEY=your-api-key-here
# or
GOOGLE_API_KEY=your-api-key-here
GEMINI_API_KEY=your-api-key-here
# or
GOOGLE_API_KEY=your-api-key-here
Creates a Gemini text/chat adapter using environment variables.
Returns: A Gemini text adapter instance.
Creates a Gemini text/chat adapter with an explicit API key.
Parameters:
Returns: A Gemini text adapter instance.
Creates a Gemini summarization adapter using environment variables.
Returns: A Gemini summarize adapter instance.
Creates a Gemini summarization adapter with an explicit API key.
Returns: A Gemini summarize adapter instance.
Creates a Gemini image generation adapter using environment variables.
Returns: A Gemini image adapter instance.
Creates a Gemini image generation adapter with an explicit API key.
Returns: A Gemini image adapter instance.
Creates a Gemini TTS adapter using environment variables.
Returns: A Gemini TTS adapter instance.
Creates a Gemini TTS adapter with an explicit API key.
Returns: A Gemini TTS adapter instance.
