messages = [ ( "system", "You are a helpful assistant that translates English to French. Translate the user sentence.", ), ("human", "I love programming."),]ai_msg = llm.invoke(messages)ai_msg
from langchain_openai import ChatOpenAIllm = ChatOpenAI( model="gpt-5-mini", # Your Azure deployment name base_url="https://{your-resource-name}.openai.azure.com/openai/v1/", api_key="your-azure-api-key")response = llm.invoke("Hello, how are you?")print(response.content)
使用 Microsoft Entra ID 与 Azure OpenAI 配合使用
v1 API 添加了原生的 Microsoft Entra ID(前身为 Azure AD)身份验证支持,具有自动令牌刷新功能。将令牌提供者可调用对象传递给 api_key 参数:
from azure.identity import DefaultAzureCredential, get_bearer_token_providerfrom langchain_openai import ChatOpenAI# Create a token provider that handles automatic refreshtoken_provider = get_bearer_token_provider( DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default")llm = ChatOpenAI( model="gpt-5-mini", # Your Azure deployment name base_url="https://{your-resource-name}.openai.azure.com/openai/v1/", api_key=token_provider # Callable that handles token refresh)# Use the model as normalmessages = [ ("system", "You are a helpful assistant."), ("human", "Translate 'I love programming' to French.")]response = llm.invoke(messages)print(response.content)
令牌提供者是一个可调用对象,它会自动检索和刷新身份验证令牌,无需手动管理令牌过期。
安装要求要使用 Microsoft Entra ID 身份验证,请安装 Azure Identity 库:
from pydantic import BaseModel, Fieldclass GetWeather(BaseModel): """Get the current weather in a given location""" location: str = Field(description="The city and state, e.g. San Francisco, CA")llm_with_tools = llm.bind_tools([GetWeather])
ai_msg = llm_with_tools.invoke( "what is the weather like in San Francisco",)ai_msg
from langchain_openai import ChatOpenAIfrom pydantic import BaseModel, Fieldllm = ChatOpenAI(model="gpt-4.1")class Movie(BaseModel): """A movie with details.""" title: str = Field(description="The title of the movie") year: int = Field(description="The year the movie was released") director: str = Field(description="The director of the movie") rating: float = Field(description="The movie's rating out of 10")structured_llm = llm.with_structured_output(Movie, method="json_schema")response = structured_llm.invoke("Provide details about the movie Inception")response
from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-4.1-mini")tool = {"type": "web_search_preview"}llm_with_tools = llm.bind_tools([tool])response = llm_with_tools.invoke("What was a positive news story from today?")
from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-4.1-mini")tool = {"type": "image_generation", "quality": "low"}llm_with_tools = llm.bind_tools([tool])ai_message = llm_with_tools.invoke( "Draw a picture of a cute fuzzy cat with an umbrella")
import base64from IPython.display import Imageimage = next( item for item in ai_message.content_blocks if item["type"] == "image")Image(base64.b64decode(image["base64"]), width=200)
from langchain_openai import ChatOpenAIllm = ChatOpenAI( model="gpt-4.1-mini", include=["file_search_call.results"], # optionally include search results)openai_vector_store_ids = [ "vs_...", # your IDs here]tool = { "type": "file_search", "vector_store_ids": openai_vector_store_ids,}llm_with_tools = llm.bind_tools([tool])response = llm_with_tools.invoke("What is deep research by OpenAI?")print(response.text)
@tool(extras={"defer_loading": True})def get_weather(location: str) -> str: """Get the current weather for a location.""" return f"The weather in {location} is sunny and 72°F"@tool(extras={"defer_loading": True})def get_recipe(query: str) -> None: """Get a recipe for chicken soup."""model = ChatOpenAI(model="gpt-5.4", use_responses_api=True)agent = create_agent( model=model, tools=[ get_weather, get_recipe, {"type": "tool_search"} ],)input_message = {"role": "user", "content": "What's the weather in San Francisco?"}result = agent.invoke({"messages": [input_message]})for message in result["messages"]: message.pretty_print()
================================ Human Message =================================What's the weather in San Francisco?================================== Ai Message ==================================[ { "id": "tsc_0667642bae2ae6c70069ad6cb31f0c819c838b18b0e1cf1279", "arguments": { "paths": [ "get_weather" ] }, "execution": "server", "status": "completed", "type": "tool_search_call" }, { "id": "tso_0667642bae2ae6c70069ad6cb339dc819c9bbc05cb432f347e", "execution": "server", "status": "completed", "tools": [ { "name": "get_weather", "parameters": { "properties": { "location": { "type": "string" } }, "required": [ "location" ], "type": "object", "additionalProperties": false }, "strict": true, "type": "function", "defer_loading": true, "description": "Get the current weather for a location." } ], "type": "tool_search_output" }, { "arguments": "{\"location\":\"San Francisco\"}", "call_id": "call_nwy9NDI24fTe8qESIRqZGtYm", "name": "get_weather", "type": "function_call", "id": "fc_0667642bae2ae6c70069ad6cb37adc819cbc55cde85e111e32", "namespace": "get_weather", "status": "completed" }]Tool Calls: get_weather (call_nwy9NDI24fTe8qESIRqZGtYm) Call ID: call_nwy9NDI24fTe8qESIRqZGtYm Args: location: San Francisco================================= Tool Message =================================Name: get_weatherThe weather in San Francisco is sunny and 72°F================================== Ai Message ==================================[ { "type": "text", "text": "It\u2019s currently sunny and 72\u00b0F in San Francisco.", "annotations": [], "id": "msg_0667642bae2ae6c70069ad6cb4829c819c8e26bc7ccc68dcd7" }]
import base64def load_png_as_base64(file_path): with open(file_path, "rb") as image_file: encoded_string = base64.b64encode(image_file.read()) return encoded_string.decode("utf-8")screenshot_1_base64 = load_png_as_base64( "/path/to/screenshot_1.png") # perhaps a screenshot of an applicationscreenshot_2_base64 = load_png_as_base64( "/path/to/screenshot_2.png") # perhaps a screenshot of the Desktop
from langchain_openai import ChatOpenAI# Initialize modelllm = ChatOpenAI(model="computer-use-preview", truncation="auto")# Bind computer-use tooltool = { "type": "computer_use_preview", "display_width": 1024, "display_height": 768, "environment": "browser",}llm_with_tools = llm.bind_tools([tool])# Construct input messageinput_message = { "role": "user", "content": [ { "type": "text", "text": ( "Click the red X to close and reveal my Desktop. " "Proceed, no confirmation needed." ), }, { "type": "input_image", "image_url": f"data:image/png;base64,{screenshot_1_base64}", }, ],}# Invoke modelresponse = llm_with_tools.invoke( [input_message], reasoning={ "generate_summary": "concise", },)
from langchain_openai import ChatOpenAIllm = ChatOpenAI( model="gpt-4.1-mini", include=["code_interpreter_call.outputs"], # optionally include outputs)llm_with_tools = llm.bind_tools( [ { "type": "code_interpreter", # Create a new container "container": {"type": "auto"}, } ])response = llm_with_tools.invoke( "Write and run code to answer the question: what is 3^3?")
请注意,上述命令创建了一个新容器。我们也可以指定现有容器 ID:
code_interpreter_calls = [ item for item in response.content if item["type"] == "code_interpreter_call"]assert len(code_interpreter_calls) == 1container_id = code_interpreter_calls[0]["extras"]["container_id"]llm_with_tools = llm.bind_tools( [ { "type": "code_interpreter", # Use an existing container "container": container_id, } ])
from langchain_openai import ChatOpenAIreasoning = { "effort": "medium", # 'low', 'medium', or 'high' "summary": "auto", # 'detailed', 'auto', or None}llm = ChatOpenAI(model="gpt-5-nano", reasoning=reasoning)response = llm.invoke("What is 3^3?")# Outputresponse.text
'3³ = 3 × 3 × 3 = 27.'
# Reasoningfor block in response.content_blocks: if block["type"] == "reasoning": print(block["reasoning"])
**Calculating the power of three**The user is asking about 3 raised to the power of 3. That's a pretty simple calculation! I know that 3^3 equals 27, so I can say, "3 to the power of 3 equals 27." I might also include a quick explanation that it's 3 multiplied by itself three times: 3 × 3 × 3 = 27. So, the answer is definitely 27.
code = """/// <summary>/// Represents a user with a first name, last name, and username./// </summary>public class User{ /// <summary> /// Gets or sets the user's first name. /// </summary> public string FirstName { get; set; } /// <summary> /// Gets or sets the user's last name. /// </summary> public string LastName { get; set; } /// <summary> /// Gets or sets the user's username. /// </summary> public string Username { get; set; }}"""llm = ChatOpenAI(model="gpt-4.1")query = ( "Replace the Username property with an Email property. " "Respond only with code, and with no markdown formatting.")response = llm.invoke( [{"role": "user", "content": query}, {"role": "user", "content": code}], prediction={"type": "content", "content": code},)print(response.content)print(response.response_metadata)
/// <summary>/// Represents a user with a first name, last name, and email./// </summary>public class User{ /// <summary> /// Gets or sets the user's first name. /// </summary> public string FirstName { get; set; } /// <summary> /// Gets or sets the user's last name. /// </summary> public string LastName { get; set; } /// <summary> /// Gets or sets the user's email. /// </summary> public string Email { get; set; }}{'token_usage': {'completion_tokens': 226, 'prompt_tokens': 166, 'total_tokens': 392, 'completion_tokens_details': {'accepted_prediction_tokens': 49, 'audio_tokens': None, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 107}, 'prompt_tokens_details': {'audio_tokens': None, 'cached_tokens': 0}}, 'model_name': 'gpt-4o-2024-08-06', 'system_fingerprint': 'fp_45cf54deae', 'finish_reason': 'stop', 'logprobs': None}
history = [ ("human", "Are you made by OpenAI? Just answer yes or no"), output_message, ("human", "And what is your name? Just give your name."),]second_output_message = llm.invoke(history)
from langchain_openai import ChatOpenAIllm = ChatOpenAI(model="gpt-4.1")# Use a cache key for repeated promptsmessages = [ {"role": "system", "content": "You are a helpful assistant that translates English to French."}, {"role": "user", "content": "I love programming."},]response = llm.invoke( messages, prompt_cache_key="translation-assistant-v1")# Check cache usagecache_read_tokens = response.usage_metadata.input_token_details.cache_readprint(f"Cached tokens used: {cache_read_tokens}")