Module: LLM::OpenAI::ResponseParser

Defined in:
lib/llm/providers/openai/response_parser.rb

Instance Method Summary collapse

Instance Method Details

#parse_completion(body) ⇒ Hash

Parameters:

  • body (Hash)

    The response body from the LLM provider

Returns:

  • (Hash)


24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# File 'lib/llm/providers/openai/response_parser.rb', line 24

def parse_completion(body)
  {
    model: body["model"],
    choices: body["choices"].map.with_index do
      extra = {
        index: _2, response: self,
        logprobs: _1["logprobs"]
      }
      LLM::Message.new(*_1["message"].values_at("role", "content"),  extra)
    end,
    prompt_tokens: body.dig("usage", "prompt_tokens"),
    completion_tokens: body.dig("usage", "completion_tokens"),
    total_tokens: body.dig("usage", "total_tokens")
  }
end

#parse_embedding(body) ⇒ Hash

Parameters:

  • body (Hash)

    The response body from the LLM provider

Returns:

  • (Hash)


11
12
13
14
15
16
17
18
# File 'lib/llm/providers/openai/response_parser.rb', line 11

def parse_embedding(body)
  {
    model: body["model"],
    embeddings: body["data"].map { _1["embedding"] },
    prompt_tokens: body.dig("usage", "prompt_tokens"),
    total_tokens: body.dig("usage", "total_tokens")
  }
end

#parse_image(body) ⇒ Hash

Parameters:

  • body (Hash)

    The response body from the LLM provider

Returns:

  • (Hash)


67
68
69
70
71
72
73
74
75
# File 'lib/llm/providers/openai/response_parser.rb', line 67

def parse_image(body)
  {
    urls: body["data"].filter_map { _1["url"] },
    images: body["data"].filter_map do
      next unless _1["b64_json"]
      StringIO.new(_1["b64_json"].unpack1("m0"))
    end
  }
end

#parse_output_response(body) ⇒ Hash

Parameters:

  • body (Hash)

    The response body from the LLM provider

Returns:

  • (Hash)


44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# File 'lib/llm/providers/openai/response_parser.rb', line 44

def parse_output_response(body)
  {
    id: body["id"],
    model: body["model"],
    input_tokens: body.dig("usage", "input_tokens"),
    output_tokens: body.dig("usage", "output_tokens"),
    total_tokens: body.dig("usage", "total_tokens"),
    outputs: body["output"].filter_map.with_index do |output, index|
      next unless output["content"]
      extra = {
        index:, response: self,
        contents: output["content"],
        annotations: output["annotations"]
      }
      LLM::Message.new(output["role"], text(output), extra)
    end
  }
end