Skip to main content
For authentication and model list, see Overview.

Installation

pip install langchain-openai

Code Examples

import os
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
  model='gpt-4.1',
  api_key=os.getenv('PIPELLM_API_KEY'),
  base_url='https://api.pipellm.com/v1'
)

response = llm.invoke([
  ('user', 'Why is the sky blue?')
])

print(response.content)

Streaming

import os
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
  model='gpt-4.1',
  api_key=os.getenv('PIPELLM_API_KEY'),
  base_url='https://api.pipellm.com/v1',
  streaming=True
)

for chunk in llm.stream('Tell me a story'):
  print(chunk.content, end='', flush=True)

Function Calling

import os
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool

@tool
def get_weather(location: str) -> str:
  """Get the weather for a location."""
  return f"Weather in {location}: 22°C, Sunny"

llm = ChatOpenAI(
  model='gpt-4.1',
  api_key=os.getenv('PIPELLM_API_KEY'),
  base_url='https://api.pipellm.com/v1'
)

llm_with_tools = llm.bind_tools([get_weather])
response = llm_with_tools.invoke("What's the weather in Tokyo?")

print(response.tool_calls)