Skip to content

Commit 874c7e4

Browse files
authored
Improved logging (#796)
* Simplify how we log stuff and get rid of the ContextualLogger * Allow configuring the logger destination * Use Langchain logger on OpenAI client requests * Reduce verbosity of logger by replacing info msgs with debug * Use Langchain logger on Google LLMs http requests * Reuse logger formatter * Revert unwanted changes * Update changelog
1 parent 44417ff commit 874c7e4

27 files changed

Lines changed: 191 additions & 270 deletions

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
## [Unreleased]
22
- Deprecate Langchain::LLM::GooglePalm
33
- Allow setting response_object: {} parameter when initializing supported Langchain::LLM::* classes
4+
- Simplify and consolidate logging for some of the LLM providers (namely OpenAI and Google). Now most of the HTTP requests are being logged when on DEBUG level
5+
- Improve doc on how to set up a custom logger with a custom destination
46

57
## [0.16.0] - 2024-09-19
68
- Remove `Langchain::Thread` class as it was not needed.

Gemfile.lock

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@ PATH
66
json-schema (~> 4)
77
matrix
88
pragmatic_segmenter (~> 0.3.0)
9-
rainbow (~> 3.1.0)
109
zeitwerk (~> 2.5)
1110

1211
GEM

README.md

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -626,11 +626,18 @@ Additional examples available: [/examples](https://github.com/andreibondarev/lan
626626

627627
## Logging
628628

629-
Langchain.rb uses standard logging mechanisms and defaults to `:warn` level. Most messages are at info level, but we will add debug or warn statements as needed.
629+
Langchain.rb uses the standard Ruby [Logger](https://ruby-doc.org/stdlib-2.4.0/libdoc/logger/rdoc/Logger.html) mechanism and defaults to same `level` value (currently `Logger::DEBUG`).
630+
630631
To show all log messages:
631632

632633
```ruby
633-
Langchain.logger.level = :debug
634+
Langchain.logger.level = Logger::DEBUG
635+
```
636+
637+
The logger logs to `STDOUT` by default. In order to configure the log destination (ie. log to a file) do:
638+
639+
```ruby
640+
Langchain.logger = Logger.new("path/to/file", **Langchain::LOGGER_OPTIONS)
634641
```
635642

636643
## Problems

langchain.gemspec

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@ Gem::Specification.new do |spec|
2828
# dependencies
2929
# Not sure if we should require this as it only applies to OpenAI usecase.
3030
spec.add_dependency "baran", "~> 0.1.9"
31-
spec.add_dependency "rainbow", "~> 3.1.0"
3231
spec.add_dependency "json-schema", "~> 4"
3332
spec.add_dependency "zeitwerk", "~> 2.5"
3433
spec.add_dependency "pragmatic_segmenter", "~> 0.3.0"

lib/langchain.rb

Lines changed: 47 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
require "logger"
44
require "pathname"
5-
require "rainbow"
65
require "zeitwerk"
76
require "uri"
87
require "json"
@@ -92,24 +91,58 @@
9291
# Langchain.logger.level = :info
9392
module Langchain
9493
class << self
95-
# @return [ContextualLogger]
96-
attr_reader :logger
97-
98-
# @param logger [Logger]
99-
# @return [ContextualLogger]
100-
def logger=(logger)
101-
@logger = ContextualLogger.new(logger)
102-
end
103-
94+
# @return [Logger]
95+
attr_accessor :logger
10496
# @return [Pathname]
10597
attr_reader :root
10698
end
10799

108-
self.logger ||= ::Logger.new($stdout, level: :debug)
109-
110-
@root = Pathname.new(__dir__)
111-
112100
module Errors
113101
class BaseError < StandardError; end
114102
end
103+
104+
module Colorizer
105+
class << self
106+
def red(str)
107+
"\e[31m#{str}\e[0m"
108+
end
109+
110+
def green(str)
111+
"\e[32m#{str}\e[0m"
112+
end
113+
114+
def yellow(str)
115+
"\e[33m#{str}\e[0m"
116+
end
117+
118+
def blue(str)
119+
"\e[34m#{str}\e[0m"
120+
end
121+
122+
def colorize_logger_msg(msg, severity)
123+
return msg unless msg.is_a?(String)
124+
125+
return red(msg) if severity.to_sym == :ERROR
126+
return yellow(msg) if severity.to_sym == :WARN
127+
msg
128+
end
129+
end
130+
end
131+
132+
LOGGER_OPTIONS = {
133+
progname: "Langchain.rb",
134+
135+
formatter: ->(severity, time, progname, msg) do
136+
Logger::Formatter.new.call(
137+
severity,
138+
time,
139+
"[#{progname}]",
140+
Colorizer.colorize_logger_msg(msg, severity)
141+
)
142+
end
143+
}.freeze
144+
145+
self.logger ||= ::Logger.new($stdout, **LOGGER_OPTIONS)
146+
147+
@root = Pathname.new(__dir__)
115148
end

lib/langchain/assistants/assistant.rb

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ def add_messages(messages:)
122122
# @return [Array<Langchain::Message>] The messages
123123
def run(auto_tool_execution: false)
124124
if messages.empty?
125-
Langchain.logger.warn("No messages to process")
125+
Langchain.logger.warn("#{self.class} - No messages to process")
126126
@state = :completed
127127
return
128128
end
@@ -272,7 +272,7 @@ def process_latest_message
272272
#
273273
# @return [Symbol] The completed state
274274
def handle_system_message
275-
Langchain.logger.warn("At least one user message is required after a system message")
275+
Langchain.logger.warn("#{self.class} - At least one user message is required after a system message")
276276
:completed
277277
end
278278

@@ -287,7 +287,7 @@ def handle_llm_message
287287
#
288288
# @return [Symbol] The failed state
289289
def handle_unexpected_message
290-
Langchain.logger.error("Unexpected message role encountered: #{messages.last.standard_role}")
290+
Langchain.logger.error("#{self.class} - Unexpected message role encountered: #{messages.last.standard_role}")
291291
:failed
292292
end
293293

@@ -311,7 +311,7 @@ def set_state_for(response:)
311311
elsif response.completion # Currently only used by Ollama
312312
:completed
313313
else
314-
Langchain.logger.error("LLM response does not contain tool calls, chat or completion response")
314+
Langchain.logger.error("#{self.class} - LLM response does not contain tool calls, chat or completion response")
315315
:failed
316316
end
317317
end
@@ -323,7 +323,7 @@ def execute_tools
323323
run_tools(messages.last.tool_calls)
324324
:in_progress
325325
rescue => e
326-
Langchain.logger.error("Error running tools: #{e.message}; #{e.backtrace.join('\n')}")
326+
Langchain.logger.error("#{self.class} - Error running tools: #{e.message}; #{e.backtrace.join('\n')}")
327327
:failed
328328
end
329329

@@ -355,7 +355,7 @@ def initialize_instructions
355355
#
356356
# @return [Langchain::LLM::BaseResponse] The LLM response object
357357
def chat_with_llm
358-
Langchain.logger.info("Sending a call to #{llm.class}", for: self.class)
358+
Langchain.logger.debug("#{self.class} - Sending a call to #{llm.class}")
359359

360360
params = @llm_adapter.build_chat_params(
361361
instructions: @instructions,

lib/langchain/contextual_logger.rb

Lines changed: 0 additions & 68 deletions
This file was deleted.

lib/langchain/llm/google_gemini.rb

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -59,15 +59,7 @@ def chat(params = {})
5959

6060
uri = URI("https://generativelanguage.googleapis.com/v1beta/models/#{parameters[:model]}:generateContent?key=#{api_key}")
6161

62-
request = Net::HTTP::Post.new(uri)
63-
request.content_type = "application/json"
64-
request.body = parameters.to_json
65-
66-
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: uri.scheme == "https") do |http|
67-
http.request(request)
68-
end
69-
70-
parsed_response = JSON.parse(response.body)
62+
parsed_response = http_post(uri, parameters)
7163

7264
wrapped_response = Langchain::LLM::GoogleGeminiResponse.new(parsed_response, model: parameters[:model])
7365

@@ -95,17 +87,25 @@ def embed(
9587

9688
uri = URI("https://generativelanguage.googleapis.com/v1beta/models/#{model}:embedContent?key=#{api_key}")
9789

98-
request = Net::HTTP::Post.new(uri)
90+
parsed_response = http_post(uri, params)
91+
92+
Langchain::LLM::GoogleGeminiResponse.new(parsed_response, model: model)
93+
end
94+
95+
private
96+
97+
def http_post(url, params)
98+
http = Net::HTTP.new(url.hostname, url.port)
99+
http.use_ssl = url.scheme == "https"
100+
http.set_debug_output(Langchain.logger) if Langchain.logger.debug?
101+
102+
request = Net::HTTP::Post.new(url)
99103
request.content_type = "application/json"
100104
request.body = params.to_json
101105

102-
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: uri.scheme == "https") do |http|
103-
http.request(request)
104-
end
105-
106-
parsed_response = JSON.parse(response.body)
106+
response = http.request(request)
107107

108-
Langchain::LLM::GoogleGeminiResponse.new(parsed_response, model: model)
108+
JSON.parse(response.body)
109109
end
110110
end
111111
end

lib/langchain/llm/google_vertex_ai.rb

Lines changed: 19 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -63,16 +63,7 @@ def embed(
6363

6464
uri = URI("#{url}#{model}:predict")
6565

66-
request = Net::HTTP::Post.new(uri)
67-
request.content_type = "application/json"
68-
request["Authorization"] = "Bearer #{@authorizer.fetch_access_token!["access_token"]}"
69-
request.body = params.to_json
70-
71-
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: uri.scheme == "https") do |http|
72-
http.request(request)
73-
end
74-
75-
parsed_response = JSON.parse(response.body)
66+
parsed_response = http_post(uri, params)
7667

7768
Langchain::LLM::GoogleGeminiResponse.new(parsed_response, model: model)
7869
end
@@ -96,16 +87,7 @@ def chat(params = {})
9687

9788
uri = URI("#{url}#{parameters[:model]}:generateContent")
9889

99-
request = Net::HTTP::Post.new(uri)
100-
request.content_type = "application/json"
101-
request["Authorization"] = "Bearer #{@authorizer.fetch_access_token!["access_token"]}"
102-
request.body = parameters.to_json
103-
104-
response = Net::HTTP.start(uri.hostname, uri.port, use_ssl: uri.scheme == "https") do |http|
105-
http.request(request)
106-
end
107-
108-
parsed_response = JSON.parse(response.body)
90+
parsed_response = http_post(uri, parameters)
10991

11092
wrapped_response = Langchain::LLM::GoogleGeminiResponse.new(parsed_response, model: parameters[:model])
11193

@@ -115,5 +97,22 @@ def chat(params = {})
11597
raise StandardError.new(parsed_response)
11698
end
11799
end
100+
101+
private
102+
103+
def http_post(url, params)
104+
http = Net::HTTP.new(url.hostname, url.port)
105+
http.use_ssl = url.scheme == "https"
106+
http.set_debug_output(Langchain.logger) if Langchain.logger.debug?
107+
108+
request = Net::HTTP::Post.new(url)
109+
request.content_type = "application/json"
110+
request["Authorization"] = "Bearer #{@authorizer.fetch_access_token!["access_token"]}"
111+
request.body = params.to_json
112+
113+
response = http.request(request)
114+
115+
JSON.parse(response.body)
116+
end
118117
end
119118
end

lib/langchain/llm/openai.rb

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,11 @@ class OpenAI < Base
3333
def initialize(api_key:, llm_options: {}, default_options: {})
3434
depends_on "ruby-openai", req: "openai"
3535

36-
@client = ::OpenAI::Client.new(access_token: api_key, **llm_options, log_errors: true)
36+
llm_options[:log_errors] = Langchain.logger.debug? unless llm_options.key?(:log_errors)
37+
38+
@client = ::OpenAI::Client.new(access_token: api_key, **llm_options) do |f|
39+
f.response :logger, Langchain.logger, {headers: true, bodies: true, errors: true}
40+
end
3741

3842
@defaults = DEFAULTS.merge(default_options)
3943
chat_parameters.update(

0 commit comments

Comments
 (0)