Gemini API Examples
Examples of using OpenAI SDK to access Gemini models.
Basic Configuration
Before starting to use the API, please ensure you have obtained an API Key. If not, please refer to Create API Key.
💡 Address Instructions
- OpenAI SDK Method (Recommended): Use
https://api.agentsflare.com/v1 - Request Method: Use new address
https://api.agentsflare.com/google/v1/models/(supports automatic channel switching on 429)
Basic Information
- API Base URL (OpenAI SDK):
https://api.agentsflare.com/v1 - Authentication Method: Bearer Token
- Content Type:
application/json
Request Examples
bash
curl -X POST "https://api.agentsflare.com/v1/chat/completions" \
-H "Authorization: Bearer YOUR_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"model": "gemini-2.5-flash",
"messages": [
{
"role": "user",
"content": "Hello, how are you?"
}
],
"max_tokens": 100,
"temperature": 0.7
}'python
from openai import OpenAI
url = "https://api.agentsflare.com/v1"
client = OpenAI(
base_url=url,
api_key="YOUR_API_KEY"
)
completion = client.chat.completions.create(
model="gemini-2.5-flash",
messages=[
{"role": "developer", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
)
print(completion.choices[0].message)javascript
import OpenAI from "openai";
const client = new OpenAI({
apiKey: process.env.AGENTSFLARE_API_KEY,
baseURL: "https://api.agentsflare.com/v1"
});
async function main() {
try {
const res = await client.chat.completions.create({
model: "gemini-2.5-flash",
messages: [{ role: "user", content: "Hello, how are you?" }],
max_tokens: 100,
temperature: 0.7
});
// You can also just get the text
console.log(res.choices?.[0]?.message?.content);
// Or print the full response
// console.log(res);
} catch (err) {
// The OpenAI SDK error object usually has more detailed response
console.error(err?.response?.data ?? err);
}
}
main();java
import com.openai.client.OpenAIClient;
import com.openai.client.okhttp.OpenAIOkHttpClient;
import com.openai.models.chat.completions.ChatCompletionCreateParams;
import com.openai.models.chat.completions.ChatCompletion;
public class Main {
public static void main(String[] args) {
String apiKey = System.getenv("AGENTSFLARE_API_KEY");
if (apiKey == null || apiKey.isBlank()) {
throw new IllegalStateException("Missing AGENTSFLARE_API_KEY env var");
}
OpenAIClient client = OpenAIOkHttpClient.builder()
.apiKey(apiKey)
.baseUrl("https://api.agentsflare.com/v1/")
.build();
ChatCompletionCreateParams params = ChatCompletionCreateParams.builder()
.model("gemini-2.5-flash")
.addMessage(ChatCompletionCreateParams.Message.builder()
.role(ChatCompletionCreateParams.Message.Role.USER)
.content("Hello, how are you?")
.build())
.maxTokens(100)
.temperature(0.7)
.build();
ChatCompletion res = client.chat().completions().create(params);
String content = res.choices().get(0).message().content();
System.out.println(content);
}
}go
package main
import (
"context"
"fmt"
"log"
"os"
openai "github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func main() {
apiKey := os.Getenv("AGENTSFLARE_API_KEY") // It is recommended to use environment variables
if apiKey == "" {
log.Fatal("missing env AGENTSFLARE_API_KEY")
}
client := openai.NewClient(
option.WithAPIKey(apiKey),
// Key: point the SDK's base url to agentsflare
option.WithBaseURL("https://api.agentsflare.com/v1/"),
)
ctx := context.Background()
resp, err := client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
Model: openai.F("gemini-2.5-flash"),
Messages: openai.F([]openai.ChatCompletionMessageParamUnion{
openai.UserMessage("Hello, how are you?"),
}),
MaxTokens: openai.F(int64(100)),
Temperature: openai.F(0.7),
})
if err != nil {
log.Fatalf("chat completion failed: %v", err)
}
// Print the reply text
if len(resp.Choices) > 0 && resp.Choices[0].Message.Content != "" {
fmt.Println(resp.Choices[0].Message.Content)
} else {
fmt.Printf("empty response: %+v\n", resp)
}
}javascript
const { OpenAI } = require("openai");
const client = new OpenAI({
apiKey: process.env.AGENTSFLARE_API_KEY,
baseURL: "https://api.agentsflare.com/v1"
});
async function main() {
try {
const res = await client.chat.completions.create({
model: "gemini-2.5-flash",
messages: [{ role: "user", content: "Hello, how are you?" }],
max_tokens: 100,
temperature: 0.7
});
// You can also just get the text
console.log(res.choices?.[0]?.message?.content);
// Or print the full response
// console.log(res);
} catch (err) {
// The OpenAI SDK error object usually has more detailed response
console.error(err?.response?.data ?? err);
}
}
main();Response Example
{
"id": "portkey-183db1aa-1e64-4478-b4f0-5ac88479373d",
"object": "chat.completion",
"created": 1768129653,
"model": "gemini-2.5-flash",
"provider": "google",
"choices": [
{
"message": {
"role": "assistant",
"content": "Hello! I'"
},
"index": 0,
"finish_reason": "length"
}
],
"usage": {
"prompt_tokens": 7,
"completion_tokens": 4,
"total_tokens": 103,
"completion_tokens_details": {
"reasoning_tokens": 92
}
}
}Request Method Streaming
Use Google Generative AI API format for streaming requests with SSE (Server-Sent Events) support.
🚀 New Address Benefits
Using https://api.agentsflare.com/google/v1/models/ supports automatic channel switching on 429 errors, providing more stable service.
Request Examples
bash
curl --location --request POST 'https://api.agentsflare.com/google/v1/models/gemini-3-pro-preview:streamGenerateContent?alt=sse' \
--header 'content-type: application/json' \
--header 'Authorization: Bearer $API_KEY' \
--data-raw '{
"contents": [{
"role": "user",
"parts": [
{"text": "Xiao Ming'\''s father has three sons. The eldest is called Da Mao, the second is called Er Mao, what is the third one called? Please reason in detail but give me only a concise answer"}
]
}]
}'python
import requests
import json
API_KEY = "YOUR_API_KEY"
url = "https://api.agentsflare.com/google/v1/models/gemini-3-pro-preview:streamGenerateContent?alt=sse"
headers = {
"content-type": "application/json",
"Authorization": f"Bearer {API_KEY}"
}
data = {
"contents": [{
"role": "user",
"parts": [
{"text": "Xiao Ming's father has three sons. The eldest is called Da Mao, the second is called Er Mao, what is the third one called? Please reason in detail but give me only a concise answer"}
]
}]
}
response = requests.post(url, headers=headers, json=data, stream=True)
for line in response.iter_lines():
if line:
line_str = line.decode('utf-8')
if line_str.startswith('data: '):
try:
json_data = json.loads(line_str[6:])
# Process streaming response data
if 'candidates' in json_data:
for candidate in json_data['candidates']:
if 'content' in candidate and 'parts' in candidate['content']:
for part in candidate['content']['parts']:
if 'text' in part:
print(part['text'], end='', flush=True)
except json.JSONDecodeError:
passjavascript
const axios = require('axios');
const API_KEY = 'YOUR_API_KEY';
const url = 'https://api.agentsflare.com/google/v1/models/gemini-3-pro-preview:streamGenerateContent?alt=sse';
const headers = {
'content-type': 'application/json',
'Authorization': `Bearer ${API_KEY}`
};
const data = {
contents: [{
role: 'user',
parts: [
{ text: "Xiao Ming's father has three sons. The eldest is called Da Mao, the second is called Er Mao, what is the third one called? Please reason in detail but give me only a concise answer" }
]
}]
};
axios.post(url, data, {
headers,
responseType: 'stream'
}).then(response => {
response.data.on('data', (chunk) => {
const lines = chunk.toString().split('\n');
lines.forEach(line => {
if (line.startsWith('data: ')) {
try {
const jsonData = JSON.parse(line.slice(6));
if (jsonData.candidates) {
jsonData.candidates.forEach(candidate => {
if (candidate.content && candidate.content.parts) {
candidate.content.parts.forEach(part => {
if (part.text) {
process.stdout.write(part.text);
}
});
}
});
}
} catch (e) {
// Ignore non-JSON lines
}
}
});
});
}).catch(error => {
console.error('Error:', error.message);
});go
package main
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
)
func main() {
apiKey := "YOUR_API_KEY"
url := "https://api.agentsflare.com/google/v1/models/gemini-3-pro-preview:streamGenerateContent?alt=sse"
payload := map[string]interface{}{
"contents": []map[string]interface{}{
{
"role": "user",
"parts": []map[string]string{
{"text": "Xiao Ming's father has three sons. The eldest is called Da Mao, the second is called Er Mao, what is the third one called? Please reason in detail but give me only a concise answer"},
},
},
},
}
jsonData, _ := json.Marshal(payload)
req, _ := http.NewRequest("POST", url, bytes.NewBuffer(jsonData))
req.Header.Set("content-type", "application/json")
req.Header.Set("Authorization", "Bearer "+apiKey)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error:", err)
return
}
defer resp.Body.Close()
reader := bufio.NewReader(resp.Body)
for {
line, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
break
}
fmt.Println("Error reading stream:", err)
return
}
if strings.HasPrefix(line, "data: ") {
jsonStr := strings.TrimPrefix(line, "data: ")
var result map[string]interface{}
if err := json.Unmarshal([]byte(jsonStr), &result); err == nil {
if candidates, ok := result["candidates"].([]interface{}); ok {
for _, candidate := range candidates {
if c, ok := candidate.(map[string]interface{}); ok {
if content, ok := c["content"].(map[string]interface{}); ok {
if parts, ok := content["parts"].([]interface{}); ok {
for _, part := range parts {
if p, ok := part.(map[string]interface{}); ok {
if text, ok := p["text"].(string); ok {
fmt.Print(text)
}
}
}
}
}
}
}
}
}
}
}
}Response Example
Streaming responses use SSE format, with each line starting with data: :
data: {"candidates":[{"content":{"parts":[{"text":"Let"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":37,"candidatesTokenCount":1,"totalTokenCount":38}}
data: {"candidates":[{"content":{"parts":[{"text":" me"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":37,"candidatesTokenCount":2,"totalTokenCount":39}}
data: {"candidates":[{"content":{"parts":[{"text":" think"}],"role":"model"},"finishReason":"STOP","index":0}],"usageMetadata":{"promptTokenCount":37,"candidatesTokenCount":3,"totalTokenCount":40}}The complete response includes the following fields:
candidates: List of candidate responsescontent.parts[].text: Generated text contentfinishReason: Completion reason (e.g., "STOP")
usageMetadata: Token usage statisticspromptTokenCount: Prompt token countcandidatesTokenCount: Generated content token counttotalTokenCount: Total token count
Request Parameters
For detailed parameters, see Gemini API Quickstart