cURL
curl --request POST \ --url https://api.k-router.com/v1beta/models/{model}:generateContent \ --header 'Content-Type: application/json' \ --header 'x-goog-api-key: <x-goog-api-key>' \ --data ' { "contents": [ {} ], "generationConfig": {}, "systemInstruction": {} } '
Google Gemini generateContent API 호환 엔드포인트
POST /v1beta/models/{model}:generateContent POST /v1beta/models/{model}:streamGenerateContent
kr-your-api-key
?key=kr-your-api-key
role
user
model
parts
temperature
topP
maxOutputTokens
stopSequences
{ "candidates": [ { "content": { "parts": [ { "text": "안녕하세요! 무엇을 도와드릴까요?" } ], "role": "model" }, "finishReason": "STOP" } ], "usageMetadata": { "promptTokenCount": 10, "candidatesTokenCount": 15, "totalTokenCount": 25 } }
from google import genai client = genai.Client( api_key="kr-your-api-key", http_options=genai.types.HttpOptions( api_version="v1beta", base_url="https://api.k-router.com" ) ) response = client.models.generate_content( model="kr/gemini-25-pro", contents="안녕하세요" ) print(response.text)
# generateContent curl -X POST "https://api.k-router.com/v1beta/models/kr%2Fgemini-25-pro:generateContent?key=kr-your-api-key" \ -H "Content-Type: application/json" \ -d '{ "contents": [ { "role": "user", "parts": [{"text": "안녕하세요"}] } ], "generationConfig": { "temperature": 0.7, "maxOutputTokens": 1024 } }'
# streamGenerateContent curl -X POST "https://api.k-router.com/v1beta/models/kr%2Fgemini-25-pro:streamGenerateContent?key=kr-your-api-key" \ -H "Content-Type: application/json" \ -d '{ "contents": [ { "role": "user", "parts": [{"text": "안녕하세요"}] } ] }'
from openai import OpenAI client = OpenAI(api_key="kr-your-api-key", base_url="https://api.k-router.com/v1") response = client.chat.completions.create( model="kr/gemini-25-pro", messages=[{"role": "user", "content": "안녕하세요"}] )