Gemini 原生 API
我们的 Chat 服务同样支持原生 Google Gemini API 格式,允许您直接使用官方 Google GenAI 库。此 API 专为偏好使用 Google 原生 SDK 而非 OpenAI 兼容格式的开发者设计。
🌟 核心特性
- ✅ 直接使用官方 Google GenAI SDK
- ✅ 完全兼容 Gemini API 格式
- ✅ 支持流式和非流式响应
- ✅ 通过我们的服务访问 Gemini 模型
📋 可用端点
| 端点 | 方法 | 描述 |
|---|---|---|
/chat/gemini/{apiVersion}/models/{model}:generateContent | POST | 生成内容(非流式) |
/chat/gemini/{apiVersion}/models/{model}:streamGenerateContent | POST | 生成内容(流式 SSE) |
💡 快速示例
- Node.js
- Python
- Go
- cURL
import { GoogleGenAI } from '@google/genai';
const API_KEY = 'your-api-key';
const API_VERSION = 'v1beta';
const BASE_URL = 'https://api.mountsea.ai/chat/gemini';
const client = new GoogleGenAI({
apiKey: API_KEY,
apiVersion: API_VERSION,
httpOptions: {
baseUrl: BASE_URL,
headers: {
"Authorization": `Bearer ${API_KEY}`,
},
},
});
// Generate content (non-streaming)
const response = await client.models.generateContent({
model: 'gemini-3-flash',
contents: [
{
role: 'user',
parts: [{ text: 'Hello! Tell me a joke.' }]
}
],
config: {
temperature: 1,
maxOutputTokens: 1024,
}
});
console.log(response.text);
const stream = await client.models.streamGenerateContent({
model: 'gemini-3-flash',
contents: [
{
role: 'user',
parts: [{ text: 'Write a short story about a robot.' }]
}
],
config: {
temperature: 0.8,
maxOutputTokens: 2048,
}
});
for await (const chunk of stream) {
process.stdout.write(chunk.text || '');
}
npm install @google/genai
from google import genai
API_KEY = "your-api-key"
API_VERSION = "v1beta"
BASE_URL = "https://api.mountsea.ai/chat/gemini"
client = genai.Client(
api_key=API_KEY,
http_options={
"base_url": BASE_URL,
"headers": {
"Authorization": f"Bearer {API_KEY}",
},
},
)
# Generate content (non-streaming)
response = client.models.generate_content(
model="gemini-3-flash",
contents=[
{
"role": "user",
"parts": [{"text": "Hello! Tell me a joke."}]
}
],
config={
"temperature": 1,
"max_output_tokens": 1024,
}
)
print(response.text)
stream = client.models.generate_content_stream(
model="gemini-3-flash",
contents=[
{
"role": "user",
"parts": [{"text": "Write a short story about a robot."}]
}
],
config={
"temperature": 0.8,
"max_output_tokens": 2048,
}
)
for chunk in stream:
print(chunk.text, end="")
pip install google-genai
package main
import (
"context"
"fmt"
"google.golang.org/genai"
)
func main() {
ctx := context.Background()
apiKey := "your-api-key"
baseURL := "https://api.mountsea.ai/chat/gemini"
client, _ := genai.NewClient(ctx, &genai.ClientConfig{
APIKey: apiKey,
BaseURL: baseURL,
Headers: map[string]string{
"Authorization": "Bearer " + apiKey,
},
})
defer client.Close()
// Generate content (non-streaming)
model := client.GenerativeModel("gemini-3-flash")
model.Temperature = genai.Ptr(float32(1.0))
model.MaxOutputTokens = genai.Ptr(int32(1024))
resp, _ := model.GenerateContent(ctx, genai.Text("Hello! Tell me a joke."))
for _, part := range resp.Candidates[0].Content.Parts {
fmt.Println(part)
}
}
iter := model.GenerateContentStream(ctx, genai.Text("Write a short story about a robot."))
for {
resp, err := iter.Next()
if err == iterator.Done {
break
}
for _, part := range resp.Candidates[0].Content.Parts {
fmt.Print(part)
}
}
go get google.golang.org/genai
# Non-streaming
curl -X POST "https://api.mountsea.ai/chat/gemini/v1beta/models/gemini-3-flash:generateContent?key=your-api-key" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer your-api-key" \
-d '{
"contents": [
{
"role": "user",
"parts": [{"text": "Hello! Tell me a joke."}]
}
],
"generationConfig": {
"temperature": 1,
"maxOutputTokens": 1024
}
}'
# Streaming (SSE)
curl -X POST "https://api.mountsea.ai/chat/gemini/v1beta/models/gemini-3-flash:streamGenerateContent?alt=sse&key=your-api-key" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer your-api-key" \
-d '{
"contents": [
{
"role": "user",
"parts": [{"text": "Write a story"}]
}
]
}'
📤 响应格式
非流式响应
{
"candidates": [
{
"content": {
"parts": [
{
"text": "Why don't scientists trust atoms? Because they make up everything!"
}
],
"role": "model"
},
"finishReason": "STOP",
"safetyRatings": [...]
}
],
"usageMetadata": {
"promptTokenCount": 10,
"candidatesTokenCount": 15,
"totalTokenCount": 25
}
}
流式响应 (SSE)
data: {"candidates":[{"content":{"parts":[{"text":"Why"}],"role":"model"}}]}
data: {"candidates":[{"content":{"parts":[{"text":" don't"}],"role":"model"}}]}
data: {"candidates":[{"content":{"parts":[{"text":" scientists"}],"role":"model"}}]}
...
🔧 配置选项
config / generationConfig 对象支持以下参数:
| 参数 | 类型 | 描述 |
|---|---|---|
temperature | number | 控制随机性(0-2) |
maxOutputTokens | number | 响应的最大 token 数 |
topP | number | Nucleus 采样参数 |
topK | number | Top-K 采样参数 |
systemInstruction | object | 系统提示配置 |
tools | array | 函数调用工具 |
使用系统指令
- Node.js
- Python
const response = await client.models.generateContent({
model: 'gemini-3-flash',
contents: [
{ role: 'user', parts: [{ text: 'What is the capital of France?' }] }
],
config: {
temperature: 0.7,
maxOutputTokens: 1024,
systemInstruction: {
role: 'user',
parts: [{ text: 'You are a helpful geography teacher. Answer concisely.' }]
}
}
});
response = client.models.generate_content(
model="gemini-3-flash",
contents=[
{"role": "user", "parts": [{"text": "What is the capital of France?"}]}
],
config={
"temperature": 0.7,
"max_output_tokens": 1024,
"system_instruction": {
"role": "user",
"parts": [{"text": "You are a helpful geography teacher. Answer concisely."}]
}
}
)
📝 多轮对话
{
"contents": [
{
"role": "user",
"parts": [{ "text": "Hi, my name is Alice." }]
},
{
"role": "model",
"parts": [{ "text": "Hello Alice! Nice to meet you." }]
},
{
"role": "user",
"parts": [{ "text": "What's my name?" }]
}
]
}
如需使用 OpenAI 兼容 API,请参阅 Chat Completions。