import openai
import langsmith as ls
from langsmith.wrappers import wrap_openai
client = openai.Client()
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
# 您可以在装饰函数时**静态**设置元数据和标签
# 使用带有标签和元数据的 @traceable 装饰器
# 确保已设置 LANGSMITH_TRACING 环境变量以使 @traceable 生效
@ls.traceable(
run_type="llm",
name="OpenAI Call Decorator",
tags=["my-tag"],
metadata={"my-key": "my-value"}
)
def call_openai(
messages: list[dict], model: str = "gpt-4.1-mini"
) -> str:
# 您也可以在父级运行上动态设置元数据:
rt = ls.get_current_run_tree()
rt.metadata["some-conditional-key"] = "some-val"
rt.tags.extend(["another-tag"])
return client.chat.completions.create(
model=model,
messages=messages,
).choices[0].message.content
call_openai(
messages,
# 要在**调用时**添加,可在调用函数时
# 通过 langsmith_extra 参数传递
langsmith_extra={"tags": ["my-other-tag"], "metadata": {"my-other-key": "my-value"}}
)
# 或者您可以在给定作用域内动态设置运行的默认元数据
# tracing_context 本身不创建跨度,但会初始化
# 后续创建的子跨度的上下文。
with ls.tracing_context(metadata={"default-key": "default-value"}):
call_openai(messages)
# 或者,您可以使用 trace 上下文管理器
# 这会创建一个具有指定元数据和标签的新跨度
with ls.trace(
name="OpenAI Call Trace",
run_type="llm",
inputs={"messages": messages},
tags=["my-tag"],
metadata={"my-key": "my-value"},
) as rt:
chat_completion = client.chat.completions.create(
model="gpt-4.1-mini",
messages=messages,
)
rt.metadata["some-conditional-key"] = "some-val"
rt.end(outputs={"output": chat_completion})
# 您可以在包装后的客户端上使用相同的技术
patched_client = wrap_openai(
client, tracing_extra={"metadata": {"my-key": "my-value"}, "tags": ["a-tag"]}
)
chat_completion = patched_client.chat.completions.create(
model="gpt-4.1-mini",
messages=messages,
langsmith_extra={
"tags": ["my-other-tag"],
"metadata": {"my-other-key": "my-value"},
},
)
LangSmith 部署:要在 Agent Server 部署中动态添加每次调用的元数据,我们建议在工厂函数中使用
tracing_context。示例请参阅在已部署代理中自定义追踪。Connect these docs to Claude, VSCode, and more via MCP for real-time answers.

