Module: LLM::OpenAI::ResponseParser
- Defined in:
- lib/llm/providers/openai/response_parser.rb
Instance Method Summary collapse
- #parse_completion(body) ⇒ Hash
- #parse_embedding(body) ⇒ Hash
- #parse_image(body) ⇒ Hash
- #parse_output_response(body) ⇒ Hash
Instance Method Details
#parse_completion(body) ⇒ Hash
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
# File 'lib/llm/providers/openai/response_parser.rb', line 24 def parse_completion(body) { model: body["model"], choices: body["choices"].map.with_index do extra = { index: _2, response: self, logprobs: _1["logprobs"] } LLM::Message.new(*_1["message"].values_at("role", "content"), extra) end, prompt_tokens: body.dig("usage", "prompt_tokens"), completion_tokens: body.dig("usage", "completion_tokens"), total_tokens: body.dig("usage", "total_tokens") } end |
#parse_embedding(body) ⇒ Hash
11 12 13 14 15 16 17 18 |
# File 'lib/llm/providers/openai/response_parser.rb', line 11 def (body) { model: body["model"], embeddings: body["data"].map { _1["embedding"] }, prompt_tokens: body.dig("usage", "prompt_tokens"), total_tokens: body.dig("usage", "total_tokens") } end |
#parse_image(body) ⇒ Hash
67 68 69 70 71 72 73 74 75 76 77 78 79 |
# File 'lib/llm/providers/openai/response_parser.rb', line 67 def parse_image(body) { urls: body["data"].filter_map { _1["url"] }, images: body["data"].filter_map do next unless _1["b64_json"] OpenStruct.from_hash( mime_type: nil, encoded: _1["b64_json"], binary: _1["b64_json"].unpack1("m0") ) end } end |
#parse_output_response(body) ⇒ Hash
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
# File 'lib/llm/providers/openai/response_parser.rb', line 44 def parse_output_response(body) { id: body["id"], model: body["model"], input_tokens: body.dig("usage", "input_tokens"), output_tokens: body.dig("usage", "output_tokens"), total_tokens: body.dig("usage", "total_tokens"), outputs: body["output"].filter_map.with_index do |output, index| next unless output["content"] extra = { index:, response: self, contents: output["content"], annotations: output["annotations"] } LLM::Message.new(output["role"], text(output), extra) end } end |