Gemini API Examples
使用 OpenAI SDK 访问gemini模型的使用示例。
基础配置
在开始使用API之前,请确保您已经获取了API Key。如果还没有,请参考创建API Key。
💡 地址说明
- OpenAI SDK 方式(推荐): 使用
https://api.agentsflare.com/v1 - Request 方式: 可使用新地址
https://api.agentsflare.com/google/v1/models/(支持429自动切换通道)
基础信息
- API Base URL (OpenAI SDK):
https://api.agentsflare.com/v1 - 认证方式: Bearer Token
- 内容类型:
application/json
请求示例
bash
curl -X POST "https://api.agentsflare.com/v1/chat/completions" \
-H "Authorization: Bearer YOUR_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"model": "gemini-2.5-flash",
"messages": [
{
"role": "user",
"content": "Hello, how are you?"
}
],
"max_tokens": 100,
"temperature": 0.7
}'python
from openai import OpenAI
url = "https://api.agentsflare.com/v1"
client = OpenAI(
base_url=url,
api_key="YOUR_API_KEY"
)
completion = client.chat.completions.create(
model="gemini-2.5-flash",
messages=[
{"role": "developer", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
)
print(completion.choices[0].message)javascript
import OpenAI from "openai";
const client = new OpenAI({
apiKey: process.env.AGENTSFLARE_API_KEY,
baseURL: "https://api.agentsflare.com/v1"
});
async function main() {
try {
const res = await client.chat.completions.create({
model: "gemini-2.5-flash",
messages: [{ role: "user", content: "Hello, how are you?" }],
max_tokens: 100,
temperature: 0.7
});
// 你也可以只取文本
console.log(res.choices?.[0]?.message?.content);
// 或打印完整响应
// console.log(res);
} catch (err) {
// openai sdk 的错误对象里通常有更详细的 response
console.error(err?.response?.data ?? err);
}
}
main();java
import com.openai.client.OpenAIClient;
import com.openai.client.okhttp.OpenAIOkHttpClient;
import com.openai.models.chat.completions.ChatCompletionCreateParams;
import com.openai.models.chat.completions.ChatCompletion;
public class Main {
public static void main(String[] args) {
String apiKey = System.getenv("AGENTSFLARE_API_KEY");
if (apiKey == null || apiKey.isBlank()) {
throw new IllegalStateException("Missing AGENTSFLARE_API_KEY env var");
}
OpenAIClient client = OpenAIOkHttpClient.builder()
.apiKey(apiKey)
.baseUrl("https://api.agentsflare.com/v1/")
.build();
ChatCompletionCreateParams params = ChatCompletionCreateParams.builder()
.model("gemini-2.5-flash")
.addMessage(ChatCompletionCreateParams.Message.builder()
.role(ChatCompletionCreateParams.Message.Role.USER)
.content("Hello, how are you?")
.build())
.maxTokens(100)
.temperature(0.7)
.build();
ChatCompletion res = client.chat().completions().create(params);
String content = res.choices().get(0).message().content();
System.out.println(content);
}
}go
package main
import (
"context"
"fmt"
"log"
"os"
openai "github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
apiKey := os.Getenv("AGENTSFLARE_API_KEY") // 建议用环境变量
if apiKey == "" {
log.Fatal("missing env AGENTSFLARE_API_KEY")
}
client := openai.NewClient(
option.WithAPIKey(apiKey),
// 关键:把 SDK 的 base url 指向 agentsflare
option.WithBaseURL("https://api.agentsflare.com/v1/"),
)
ctx := context.Background()
resp, err := client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
Model: openai.F("gemini-2.5-flash"),
Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
openai.UserMessage("Hello, how are you?"),
}),
MaxTokens: openai.F(int64(100)),
Temperature: openai.F(0.7),
})
if err != nil {
log.Fatalf("chat completion failed: %v", err)
}
// 打印回复文本
if len(resp.Choices) > 0 && resp.Choices[0].Message.Content != "" {
fmt.Println(resp.Choices[0].Message.Content)
} else {
fmt.Printf("empty response: %+v\n", resp)
}
}javascript
const { OpenAI } = require("openai");
const client = new OpenAI({
apiKey: process.env.AGENTSFLARE_API_KEY,
baseURL: "https://api.agentsflare.com/v1"
});
async function main() {
try {
const res = await client.chat.completions.create({
model: "gemini-2.5-flash",
messages: [{ role: "user", content: "Hello, how are you?" }],
max_tokens: 100,
temperature: 0.7
});
// 你也可以只取文本
console.log(res.choices?.[0]?.message?.content);
// 或打印完整响应
// console.log(res);
} catch (err) {
// openai sdk 的错误对象里通常有更详细的 response
console.error(err?.response?.data ?? err);
}
}
main();响应示例
保留转义
{
"id": "portkey-183db1aa-1e64-4478-b4f0-5ac88479373d",
"object": "chat.completion",
"created": 1768129653,
"model": "gemini-2.5-flash",
"provider": "google",
"choices": [
{
"message": {
"role": "assistant",
"content": "Hello! I'"
},
"index": 0,
"finish_reason": "length"
}
],
"usage": {
"prompt_tokens": 7,
"completion_tokens": 4,
"total_tokens": 103,
"completion_tokens_details": {
"reasoning_tokens": 92
}
}
}Request 方式流式请求
使用 Google Generative AI API 格式进行流式请求,支持 SSE(Server-Sent Events)。
🚀 新地址优势
使用 https://api.agentsflare.com/google/v1/models/ 支持 429 自动切换通道,提供更稳定的服务。
请求示例
bash
curl --location --request POST 'https://api.agentsflare.com/google/v1/models/gemini-3-pro-preview:streamGenerateContent?alt=sse' \
--header 'content-type: application/json' \
--header 'Authorization: Bearer $API_KEY' \
--data-raw '{
"contents": [{
"role": "user",
"parts": [
{"text": "小明的爸爸有三个儿子,老大叫大毛,老二叫二毛,老三叫什么名字?请做详细推理,但仅简明扼要的告诉我答案"}
]
}]
}'python
import requests
import json
API_KEY = "YOUR_API_KEY"
url = "https://api.agentsflare.com/google/v1/models/gemini-3-pro-preview:streamGenerateContent?alt=sse"
headers = {
"content-type": "application/json",
"Authorization": f"Bearer {API_KEY}"
}
data = {
"contents": [{
"role": "user",
"parts": [
{"text": "小明的爸爸有三个儿子,老大叫大毛,老二叫二毛,老三叫什么名字?请做详细推理,但仅简明扼要的告诉我答案"}
]
}]
}
response = requests.post(url, headers=headers, json=data, stream=True)
for line in response.iter_lines():
if line:
line_str = line.decode('utf-8')
if line_str.startswith('data: '):
try:
json_data = json.loads(line_str[6:])
# 处理流式响应数据
if 'candidates' in json_data:
for candidate in json_data['candidates']:
if 'content' in candidate and 'parts' in candidate['content']:
for part in candidate['content']['parts']:
if 'text' in part:
print(part['text'], end='', flush=True)
except json.JSONDecodeError:
passjavascript
const axios = require('axios');
const API_KEY = 'YOUR_API_KEY';
const url = 'https://api.agentsflare.com/google/v1/models/gemini-3-pro-preview:streamGenerateContent?alt=sse';
const headers = {
'content-type': 'application/json',
'Authorization': `Bearer ${API_KEY}`
};
const data = {
contents: [{
role: 'user',
parts: [
{ text: '小明的爸爸有三个儿子,老大叫大毛,老二叫二毛,老三叫什么名字?请做详细推理,但仅简明扼要的告诉我答案' }
]
}]
};
axios.post(url, data, {
headers,
responseType: 'stream'
}).then(response => {
response.data.on('data', (chunk) => {
const lines = chunk.toString().split('\n');
lines.forEach(line => {
if (line.startsWith('data: ')) {
try {
const jsonData = JSON.parse(line.slice(6));
if (jsonData.candidates) {
jsonData.candidates.forEach(candidate => {
if (candidate.content && candidate.content.parts) {
candidate.content.parts.forEach(part => {
if (part.text) {
process.stdout.write(part.text);
}
});
}
});
}
} catch (e) {
// 忽略非 JSON 行
}
}
});
});
}).catch(error => {
console.error('Error:', error.message);
});go
package main
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
)
func main() {
apiKey := "YOUR_API_KEY"
url := "https://api.agentsflare.com/google/v1/models/gemini-3-pro-preview:streamGenerateContent?alt=sse"
payload := map[string]interface{}{
"contents": []map[string]interface{}{
{
"role": "user",
"parts": []map[string]string{
{"text": "小明的爸爸有三个儿子,老大叫大毛,老二叫二毛,老三叫什么名字?请做详细推理,但仅简明扼要的告诉我答案"},
},
},
},
}
jsonData, _ := json.Marshal(payload)
req, _ := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
req.Header.Set("content-type", "application/json")
req.Header.Set("Authorization", "Bearer "+apiKey)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error:", err)
return
}
defer resp.Body.Close()
reader := bufio.NewReader(resp.Body)
for {
line, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
break
}
fmt.Println("Error reading stream:", err)
return
}
if strings.HasPrefix(line, "data: ") {
jsonStr := strings.TrimPrefix(line, "data: ")
var result map[string]interface{}
if err := json.Unmarshal([]byte(jsonStr), &result); err == nil {
if candidates, ok := result["candidates"].([]interface{}); ok {
for _, candidate := range candidates {
if c, ok := candidate.(map[string]interface{}); ok {
if content, ok := c["content"].(map[string]interface{}); ok {
if parts, ok := content["parts"].([]interface{}); ok {
for _, part := range parts {
if p, ok := part.(map[string]interface{}); ok {
if text, ok := p["text"].(string); ok {
fmt.Print(text)
}
}
}
}
}
}
}
}
}
}
}
}响应示例
流式响应采用 SSE 格式,每行以 data: 开头:
data: {"candidates":[{"content":{"parts":[{"text":"让"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":37,"candidatesTokenCount":1,"totalTokenCount":38}}
data: {"candidates":[{"content":{"parts":[{"text":"我"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":37,"candidatesTokenCount":2,"totalTokenCount":39}}
data: {"candidates":[{"content":{"parts":[{"text":"来"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":37,"candidatesTokenCount":3,"totalTokenCount":40}}完整响应包含以下字段:
candidates: 候选响应列表content.parts[].text: 生成的文本内容finishReason: 完成原因(如 "STOP")
usageMetadata: Token 使用统计promptTokenCount: 提示词 Token 数candidatesTokenCount: 生成内容 Token 数totalTokenCount: 总 Token 数