Class: LLM::OpenAI

Inherits:
Provider show all
Defined in:
lib/llm/providers/openai.rb,
lib/llm/providers/openai/audio.rb,
lib/llm/providers/openai/files.rb,
lib/llm/providers/openai/format.rb,
lib/llm/providers/openai/images.rb,
lib/llm/providers/openai/models.rb,
lib/llm/providers/openai/responses.rb,
lib/llm/providers/openai/moderations.rb,
lib/llm/providers/openai/error_handler.rb,
lib/llm/providers/openai/stream_parser.rb,
lib/llm/providers/openai/vector_stores.rb

Overview

The OpenAI class implements a provider for OpenAI.

Examples:

#!/usr/bin/env ruby
require "llm"

llm = LLM.openai(key: ENV["KEY"])
bot = LLM::Bot.new(llm)
bot.chat ["Tell me about this photo", File.open("/images/capybara.jpg", "rb")]
bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }

Direct Known Subclasses

DeepSeek, LlamaCpp

Defined Under Namespace

Modules: Response Classes: Audio, Files, Images, Models, Moderations, Responses, VectorStores

Constant Summary collapse

HOST =
"api.openai.com"

Instance Method Summary collapse

Methods inherited from Provider

#chat, #chat!, #inspect, #respond, #respond!, #schema, #with

Constructor Details

#initializeOpenAI

Returns a new instance of OpenAI.

Parameters:

  • key (String, nil)

    The secret key for authentication



36
37
38
# File 'lib/llm/providers/openai.rb', line 36

def initialize(**)
  super(host: HOST, **)
end

Instance Method Details

#default_modelString

Returns the default model for chat completions

Returns:

  • (String)

See Also:



145
146
147
# File 'lib/llm/providers/openai.rb', line 145

def default_model
  "gpt-4.1"
end

#embed(input, model: "text-embedding-3-small", **params) ⇒ LLM::Response

Provides an embedding

Parameters:

  • input (String, Array<String>)

    The input to embed

  • model (String) (defaults to: "text-embedding-3-small")

    The embedding model to use

  • params (Hash)

    Other embedding parameters

Returns:

See Also:



48
49
50
51
52
53
# File 'lib/llm/providers/openai.rb', line 48

def embed(input, model: "text-embedding-3-small", **params)
  req = Net::HTTP::Post.new("/v1/embeddings", headers)
  req.body = JSON.dump({input:, model:}.merge!(params))
  res = execute(request: req)
  LLM::Response.new(res).extend(LLM::OpenAI::Response::Embedding)
end

#complete(prompt, params = {}) ⇒ LLM::Response

Provides an interface to the chat completions API

Examples:

llm = LLM.openai(ENV["KEY"])
messages = [{role: "system", content: "Your task is to answer all of my questions"}]
res = llm.complete("5 + 2 ?", messages:)
print "[#{res.choices[0].role}]", res.choices[0].content, "\n"

Parameters:

  • prompt (String)

    The input prompt to be completed

  • params (Hash) (defaults to: {})

    The parameters to maintain throughout the conversation. Any parameter the provider supports can be included and not only those listed here.

Returns:

Raises:

See Also:



65
66
67
68
69
70
71
72
73
74
75
76
# File 'lib/llm/providers/openai.rb', line 65

def complete(prompt, params = {})
  params = {role: :user, model: default_model}.merge!(params)
  params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
  role, stream = params.delete(:role), params.delete(:stream)
  params[:stream] = true if stream.respond_to?(:<<) || stream == true
  req = Net::HTTP::Post.new("/v1/chat/completions", headers)
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
  body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
  set_body_stream(req, StringIO.new(body))
  res = execute(request: req, stream:)
  LLM::Response.new(res).extend(LLM::OpenAI::Response::Completion)
end

#responsesLLM::OpenAI::Responses

Provides an interface to OpenAI’s response API

Returns:

See Also:



82
83
84
# File 'lib/llm/providers/openai.rb', line 82

def responses
  LLM::OpenAI::Responses.new(self)
end

#imagesLLM::OpenAI::Images

Provides an interface to OpenAI’s image generation API

Returns:

See Also:



90
91
92
# File 'lib/llm/providers/openai.rb', line 90

def images
  LLM::OpenAI::Images.new(self)
end

#audioLLM::OpenAI::Audio

Provides an interface to OpenAI’s audio generation API

Returns:

See Also:



98
99
100
# File 'lib/llm/providers/openai.rb', line 98

def audio
  LLM::OpenAI::Audio.new(self)
end

#filesLLM::OpenAI::Files

Provides an interface to OpenAI’s files API

Returns:

See Also:



106
107
108
# File 'lib/llm/providers/openai.rb', line 106

def files
  LLM::OpenAI::Files.new(self)
end

#modelsLLM::OpenAI::Models

Provides an interface to OpenAI’s models API

Returns:

See Also:



114
115
116
# File 'lib/llm/providers/openai.rb', line 114

def models
  LLM::OpenAI::Models.new(self)
end

#moderationsLLM::OpenAI::Moderations

Provides an interface to OpenAI’s moderation API



123
124
125
# File 'lib/llm/providers/openai.rb', line 123

def moderations
  LLM::OpenAI::Moderations.new(self)
end

#vector_storesLLM::OpenAI::VectorStore

Provides an interface to OpenAI’s vector store API

Returns:

  • (LLM::OpenAI::VectorStore)

See Also:



131
132
133
# File 'lib/llm/providers/openai.rb', line 131

def vector_stores
  LLM::OpenAI::VectorStores.new(self)
end

#assistant_roleString

Returns the role of the assistant in the conversation. Usually “assistant” or “model”

Returns:

  • (String)

    Returns the role of the assistant in the conversation. Usually “assistant” or “model”



137
138
139
# File 'lib/llm/providers/openai.rb', line 137

def assistant_role
  "assistant"
end