Skip to content

API调用示例

本教程提供丰富的 OpenAI API 调用示例,帮助你快速上手开发。

基础调用

GPT-3.5 文本对话

python
from openai import OpenAI

client = OpenAI(api_key="your-api-key")

response = client.chat.completions.create(
    model="gpt-3.5-turbo",
    messages=[
        {"role": "system", "content": "你是一位专业的Python讲师"},
        {"role": "user", "content": "请解释什么是装饰器,以及如何使用它"}
    ],
    temperature=0.7,
    max_tokens=1000
)

print(response.choices[0].message.content)

GPT-4 高级对话

javascript
import OpenAI from 'openai';

const client = new OpenAI({
  apiKey: process.env.OPENAI_API_KEY,
});

const response = await client.chat.completions.create({
  model: "gpt-4",
  messages: [
    {
      role: "system",
      content: "你是一位资深架构师,擅长系统设计和性能优化"
    },
    {
      role: "user",
      content: "如何设计一个日活千万级用户的即时通讯系统?"
    }
  ],
  temperature: 0.5,
  max_tokens: 2000
});

console.log(response.choices[0].message.content);

流式输出

对于长文本生成,流式输出可以提供更好的用户体验:

python
from openai import OpenAI
import os

client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))

stream = client.chat.completions.create(
    model="gpt-3.5-turbo",
    messages=[
        {"role": "user", "content": "写一首关于人工智能的诗"}
    ],
    stream=True
)

for chunk in stream:
    if chunk.choices[0].delta.content:
        print(chunk.choices[0].delta.content, end="", flush=True)
javascript
import OpenAI from 'openai';

const client = new OpenAI();

const stream = await client.chat.completions.create({
  model: "gpt-3.5-turbo",
  messages: [{ role: "user", content: "用Python实现一个快速排序" }],
  stream: true,
});

for await (const chunk of stream) {
  process.stdout.write(chunk.choices[0]?.delta?.content || "");
}

上下文对话(多轮)

简单上下文管理

python
from openai import OpenAI

client = OpenAI(api_key="your-api-key")

# 初始化对话历史
messages = [
    {"role": "system", "content": "你是一位友好的助手"}
]

while True:
    user_input = input("你: ")
    if user_input.lower() in ["exit", "quit", "q"]:
        break

    messages.append({"role": "user", "content": user_input})

    response = client.chat.completions.create(
        model="gpt-3.5-turbo",
        messages=messages
    )

    assistant_message = response.choices[0].message.content
    print(f"助手: {assistant_message}")

    messages.append({"role": "assistant", "content": assistant_message})

自动摘要旧消息(节省 token)

python
from openai import OpenAI

client = OpenAI(api_key="your-api-key")

MAX_TOKENS = 3000  # 留出空间给新消息

def summarize_if_needed(messages):
    total_tokens = sum(len(m.split()) * 1.3 for m in messages)  # 粗略估算

    if total_tokens > MAX_TOKENS:
        # 摘要前几条消息
        summary_response = client.chat.completions.create(
            model="gpt-3.5-turbo",
            messages=[
                {"role": "system", "content": "请用简洁的语言总结以下对话的要点:"},
                *[{"role": "user", "content": messages[1]["content"]}]
            ]
        )
        summary = summary_response.choices[0].message.content
        return [
            messages[0],  # 保留 system prompt
            {"role": "system", "content": f"之前的对话摘要:{summary}"}
        ]
    return messages

函数调用(Function Calling)

GPT-4 可以调用你定义的函数,实现更强大的功能:

python
from openai import OpenAI
import json

client = OpenAI(api_key="your-api-key")

tools = [
    {
        "type": "function",
        "function": {
            "name": "get_weather",
            "description": "获取指定城市的天气信息",
            "parameters": {
                "type": "object",
                "properties": {
                    "city": {
                        "type": "string",
                        "description": "城市名称,如:北京、上海"
                    },
                    "unit": {
                        "type": "string",
                        "enum": ["celsius", "fahrenheit"],
                        "description": "温度单位"
                    }
                },
                "required": ["city"]
            }
        }
    }
]

messages = [
    {"role": "user", "content": "北京今天天气怎么样?需要穿什么衣服?"}
]

response = client.chat.completions.create(
    model="gpt-3.5-turbo-1106",
    messages=messages,
    tools=tools,
    tool_choice="auto"
)

response_message = response.choices[0].message

# 检查是否需要调用函数
if response_message.tool_calls:
    tool_call = response_message.tool_calls[0]
    function_name = tool_call.function.name
    arguments = json.loads(tool_call.function.arguments)

    print(f"需要调用函数: {function_name}")
    print(f"参数: {arguments}")

    # 模拟函数调用结果
    if function_name == "get_weather":
        weather_result = f"北京今天晴,25°C,适合穿短袖"
        messages.append(response_message)
        messages.append({
            "role": "tool",
            "tool_call_id": tool_call.id,
            "content": weather_result
        })

        # 再次调用,获取最终回复
        final_response = client.chat.completions.create(
            model="gpt-3.5-turbo-1106",
            messages=messages
        )
        print(f"最终回复: {final_response.choices[0].message.content}")

文本嵌入(Embeddings)

python
from openai import OpenAI

client = OpenAI(api_key="your-api-key")

# 获取文本嵌入
response = client.embeddings.create(
    model="text-embedding-3-small",
    input="人工智能将改变未来的工作方式"
)

embedding = response.data[0].embedding
print(f"嵌入向量维度: {len(embedding)}")
print(f"前5个值: {embedding[:5]}")

语义搜索示例

python
from openai import OpenAI

client = OpenAI(api_key="your-api-key")

documents = [
    "Python是一种高级编程语言,适合初学者学习",
    "JavaScript主要用于网页前端开发",
    "机器学习是人工智能的一个分支",
    "深度学习使用神经网络模拟人脑工作",
    "React是一个用于构建用户界面的JavaScript库"
]

# 获取文档嵌入
doc_embeddings = []
for doc in documents:
    response = client.embeddings.create(
        model="text-embedding-3-small",
        input=doc
    )
    doc_embeddings.append(response.data[0].embedding)

def cosine_similarity(a, b):
    return sum(x * y for x, y in zip(a, b)) / (
        (sum(x * x for x in a) ** 0.5) * (sum(x * x for x in b) ** 0.5)
    )

query = "我想学网页开发"
query_response = client.embeddings.create(
    model="text-embedding-3-small",
    input=query
)
query_embedding = query_response.data[0].embedding

# 计算相似度
similarities = []
for i, doc_emb in enumerate(doc_embeddings):
    sim = cosine_similarity(query_embedding, doc_emb)
    similarities.append((i, sim, documents[i]))

# 排序输出
similarities.sort(key=lambda x: x[1], reverse=True)
print(f"查询: {query}\n最相关的结果:")
for i, sim, doc in similarities[:3]:
    print(f"  [{sim:.3f}] {doc}")

图像生成(DALL-E API)

python
from openai import OpenAI

client = OpenAI(api_key="your-api-key")

response = client.images.generate(
    model="dall-e-3",
    prompt="一个可爱的机器人正在咖啡馆里写代码,赛博朋克风格",
    size="1024x1024",
    quality="standard",
    n=1,
)

image_url = response.data[0].url
print(f"生成的图片: {image_url}")

错误处理

python
from openai import OpenAI
from openai import RateLimitError, APIError

client = OpenAI(api_key="your-api-key")

def call_with_retry(messages, max_retries=3):
    for attempt in range(max_retries):
        try:
            response = client.chat.completions.create(
                model="gpt-3.5-turbo",
                messages=messages
            )
            return response

        except RateLimitError:
            print(f"触达速率限制,等待后重试... ({attempt + 1}/{max_retries})")
            import time
            time.sleep(2 ** attempt)  # 指数退避

        except APIError as e:
            print(f"API错误: {e}")
            if attempt == max_retries - 1:
                raise

    raise Exception("达到最大重试次数")

# 使用
response = call_with_retry([
    {"role": "user", "content": "你好"}
])
print(response.choices[0].message.content)

常用参数说明

参数说明推荐值
model使用的模型gpt-3.5-turbo / gpt-4
temperature创造性(0-2)0.7(日常)、0.2(精确)
max_tokens最大输出 token 数根据需求设置
top_p核采样通常与 temperature 二选一
frequency_penalty频率惩罚-2.02.0
presence_penalty存在惩罚-2.02.0

下一步

本站仅供学习交流,请勿用于商业用途