Class: LLM::Gemini

Inherits:
Provider show all
Includes:
Format
Defined in:
lib/llm/providers/gemini.rb,
lib/llm/providers/gemini/audio.rb,
lib/llm/providers/gemini/files.rb,
lib/llm/providers/gemini/format.rb,
lib/llm/providers/gemini/images.rb,
lib/llm/providers/gemini/models.rb,
lib/llm/providers/gemini/error_handler.rb,
lib/llm/providers/gemini/stream_parser.rb

Overview

The Gemini class implements a provider for [Gemini](ai.google.dev/).

The Gemini provider can accept multiple inputs (text, images, audio, and video). The inputs can be provided inline via the prompt for files under 20MB or via the Gemini Files API for files that are over 20MB

Examples:

example #1

#!/usr/bin/env ruby
require "llm"

llm = LLM.gemini(ENV["KEY"])
bot = LLM::Bot.new(llm)
bot.chat LLM.File("/images/capybara.png")
bot.chat "Describe the image"
bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }

example #2

#!/usr/bin/env ruby
require "llm"

llm = LLM.gemini(ENV["KEY"])
bot = LLM::Bot.new(llm)
bot.chat ["Describe the image", LLM::File("/images/capybara.png")]
bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }

Defined Under Namespace

Modules: Format, Response Classes: Audio, ErrorHandler, Files, Images, Models, StreamParser

Constant Summary collapse

HOST =
"generativelanguage.googleapis.com"

Instance Method Summary collapse

Methods included from Format

#format

Methods inherited from Provider

#chat, #chat!, #inspect, #moderations, #respond, #respond!, #responses, #schema, #vector_stores, #with

Constructor Details

#initializeGemini

Returns a new instance of Gemini.

Parameters:

  • key (String, nil)

    The secret key for authentication



48
49
50
# File 'lib/llm/providers/gemini.rb', line 48

def initialize(**)
  super(host: HOST, **)
end

Instance Method Details

#assistant_roleString

Returns the role of the assistant in the conversation. Usually “assistant” or “model”

Returns:

  • (String)

    Returns the role of the assistant in the conversation. Usually “assistant” or “model”



124
125
126
# File 'lib/llm/providers/gemini.rb', line 124

def assistant_role
  "model"
end

#audioObject

Provides an interface to Gemini’s audio API

See Also:



96
97
98
# File 'lib/llm/providers/gemini.rb', line 96

def audio
  LLM::Gemini::Audio.new(self)
end

#complete(prompt, params = {}) ⇒ LLM::Response

Provides an interface to the chat completions API

Examples:

llm = LLM.openai(ENV["KEY"])
messages = [{role: "system", content: "Your task is to answer all of my questions"}]
res = llm.complete("5 + 2 ?", messages:)
print "[#{res.choices[0].role}]", res.choices[0].content, "\n"

Parameters:

  • prompt (String)

    The input prompt to be completed

  • params (Hash) (defaults to: {})

    The parameters to maintain throughout the conversation. Any parameter the provider supports can be included and not only those listed here.

Returns:

Raises:

See Also:



78
79
80
81
82
83
84
85
86
87
88
89
90
91
# File 'lib/llm/providers/gemini.rb', line 78

def complete(prompt, params = {})
  params = {role: :user, model: default_model}.merge!(params)
  params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
  role, model, stream = [:role, :model, :stream].map { params.delete(_1) }
  action = stream ? "streamGenerateContent?key=#{@key}&alt=sse" : "generateContent?key=#{@key}"
  model.respond_to?(:id) ? model.id : model
  path = ["/v1beta/models/#{model}", action].join(":")
  req  = Net::HTTP::Post.new(path, headers)
  messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
  body = JSON.dump({contents: format(messages)}.merge!(params))
  set_body_stream(req, StringIO.new(body))
  res = execute(request: req, stream:)
  LLM::Response.new(res).extend(LLM::Gemini::Response::Completion)
end

#default_modelString

Returns the default model for chat completions

Returns:

  • (String)

See Also:



132
133
134
# File 'lib/llm/providers/gemini.rb', line 132

def default_model
  "gemini-2.5-flash"
end

#embed(input, model: "text-embedding-004", **params) ⇒ LLM::Response

Provides an embedding

Parameters:

  • input (String, Array<String>)

    The input to embed

  • model (String) (defaults to: "text-embedding-004")

    The embedding model to use

  • params (Hash)

    Other embedding parameters

Returns:



59
60
61
62
63
64
65
66
# File 'lib/llm/providers/gemini.rb', line 59

def embed(input, model: "text-embedding-004", **params)
  model = model.respond_to?(:id) ? model.id : model
  path = ["/v1beta/models/#{model}", "embedContent?key=#{@key}"].join(":")
  req = Net::HTTP::Post.new(path, headers)
  req.body = JSON.dump({content: {parts: [{text: input}]}})
  res = execute(request: req)
  LLM::Response.new(res).extend(LLM::Gemini::Response::Embedding)
end

#filesObject

Provides an interface to Gemini’s file management API

See Also:



111
112
113
# File 'lib/llm/providers/gemini.rb', line 111

def files
  LLM::Gemini::Files.new(self)
end

#imagessee LLM::Gemini::Images

Provides an interface to Gemini’s image generation API

Returns:

See Also:



104
105
106
# File 'lib/llm/providers/gemini.rb', line 104

def images
  LLM::Gemini::Images.new(self)
end

#modelsObject

Provides an interface to Gemini’s models API

See Also:



118
119
120
# File 'lib/llm/providers/gemini.rb', line 118

def models
  LLM::Gemini::Models.new(self)
end