Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion lib/subscribers/openai/base.js
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class OpenAISubscriber extends Subscriber {
}

get enabled() {
return super.enabled && this.config.ai_monitoring?.enabled
return super.enabled && this.agent.config.ai_monitoring.enabled
}

/**
Expand Down
8 changes: 8 additions & 0 deletions lib/subscribers/openai/chat.js
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I suspect there's a better way of handling this than to replicate checks in multiple methods across multiple implementations. Perhaps something like:

class Subscriber {
	enable() {
		if (this.shouldEnable === false) {
			this.logger.trace(
				{ packageName: this.packageName },
				'Not subscribing to channel events. Instrumentation is disabled.'
			)
			return
		}

		// do the real stuff
	}
}

Copy link
Contributor Author

@amychisholm03 amychisholm03 Dec 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ideally, this would be enough. However, because collect_ai / ai_monitoring can be changed serverside, we have to check the config values after the subscribers have been setup/enabled.

Also, I wanted to maintain parity with the other AIM subscribers, and I'll likely refactor this ai_monitoring check logic in #3487

Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ class OpenAIChatCompletions extends OpenAISubscriber {
this.logger.warn(`Instrumenting chat completion streams is only supported with openai version ${MIN_STREAM_VERSION}+.`)
return ctx
}
if (!this.enabled) {
this.logger.debug('OpenAI instrumentation is disabled, not creating segment.')
return
}

const segment = this.agent.tracer.createSegment({
name: OPENAI.COMPLETION,
Expand All @@ -43,6 +47,10 @@ class OpenAIChatCompletions extends OpenAISubscriber {
}

asyncEnd(data) {
if (!this.enabled) {
this.logger.debug('OpenAI instrumentation is disabled, not recording Llm events.')
return
}
const ctx = this.agent.tracer.getContext()
if (!ctx?.segment || !ctx?.transaction) {
return
Expand Down
8 changes: 8 additions & 0 deletions lib/subscribers/openai/embeddings.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@ class OpenAIEmbeddings extends OpenAISubscriber {
}

handler(data, ctx) {
if (!this.enabled) {
this.logger.debug('OpenAI instrumentation is disabled, not creating segment.')
return
}
const segment = this.agent.tracer.createSegment({
name: OPENAI.EMBEDDING,
parent: ctx.segment,
Expand All @@ -28,6 +32,10 @@ class OpenAIEmbeddings extends OpenAISubscriber {
}

asyncEnd(data) {
if (!this.enabled) {
this.logger.debug('OpenAI instrumentation is disabled, not recording Llm events.')
return
}
const ctx = this.agent.tracer.getContext()
if (!ctx?.segment || !ctx?.transaction) {
return
Expand Down
32 changes: 30 additions & 2 deletions test/versioned/openai/chat-completions-res-api.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,25 @@ test('responses.create', async (t) => {
assert.equal(events.length, 0, 'should not create llm events')
})

await t.test('should not create segment or llm events when ai_monitoring.enabled is false', (t, end) => {
const { client, agent } = t.nr
agent.config.ai_monitoring.enabled = false
helper.runInTransaction(agent, async (tx) => {
await client.responses.create({
input: [{ role: 'user', content: 'You are a mathematician.' }]
})

const events = agent.customEventAggregator.events.toArray()
assert.equal(events.length, 0, 'should not create llm events when ai_monitoring is disabled')

const children = tx.trace.segments.root.children
assert.equal(children.length, 0, 'should not create OpenAI completion segment')

tx.end()
end()
})
})

await t.test('auth errors should be tracked', (t, end) => {
const { client, agent } = t.nr
helper.runInTransaction(agent, async (tx) => {
Expand Down Expand Up @@ -468,7 +487,7 @@ test('responses.create', async (t) => {
})

await t.test('should not create llm events when ai_monitoring.streaming.enabled is false', (t, end) => {
const { client, agent } = t.nr
const { client, agent, host, port } = t.nr
agent.config.ai_monitoring.streaming.enabled = false
helper.runInTransaction(agent, async (tx) => {
const content = 'Streamed response'
Expand All @@ -488,7 +507,16 @@ test('responses.create', async (t) => {
assert.equal(res, expectedRes.body.response.output[0].content[0].text)

const events = agent.customEventAggregator.events.toArray()
assert.equal(events.length, 0, 'should not llm events when streaming is disabled')
assert.equal(events.length, 0, 'should not create llm events when streaming is disabled')

// Should still create the OPENAI.COMPLETION segment since ai_monitoring is enabled
assertSegments(
tx.trace,
tx.trace.root,
[OPENAI.COMPLETION, [`External/${host}:${port}/responses`]],
{ exact: false }
)

const metrics = agent.metrics.getOrCreateMetric(TRACKING_METRIC)
assert.equal(metrics.callCount > 0, true)
const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT)
Expand Down
44 changes: 34 additions & 10 deletions test/versioned/openai/chat-completions.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,11 @@ test('chat.completions.create', async (t) => {
ctx.nr.server = server
ctx.nr.agent = helper.instrumentMockedAgent({
ai_monitoring: {
enabled: true
enabled: true,
streaming: {
enabled: true
}
},
streaming: {
enabled: true
}
})
const OpenAI = require('openai')
ctx.nr.client = new OpenAI({
Expand Down Expand Up @@ -398,7 +398,7 @@ test('chat.completions.create', async (t) => {
})

test('should not create llm events when ai_monitoring.streaming.enabled is false', (t, end) => {
const { client, agent } = t.nr
const { client, agent, host, port } = t.nr
agent.config.ai_monitoring.streaming.enabled = false
helper.runInTransaction(agent, async (tx) => {
const content = 'Streamed response'
Expand All @@ -421,11 +421,16 @@ test('chat.completions.create', async (t) => {
assert.equal(res, expectedRes.streamData)

const events = agent.customEventAggregator.events.toArray()
assert.equal(events.length, 0, 'should not llm events when streaming is disabled')
const metrics = agent.metrics.getOrCreateMetric(TRACKING_METRIC)
assert.equal(metrics.callCount > 0, true)
const attributes = tx.trace.attributes.get(DESTINATIONS.TRANS_EVENT)
assert.equal(attributes.llm, true)
assert.equal(events.length, 0, 'should not create llm events when streaming is disabled')

// Should still create the OPENAI.COMPLETION segment since ai_monitoring is enabled
assertSegments(
tx.trace,
tx.trace.root,
[OPENAI.COMPLETION, [`External/${host}:${port}/chat/completions`]],
{ exact: false }
)

const streamingDisabled = agent.metrics.getOrCreateMetric(
'Supportability/Nodejs/ML/Streaming/Disabled'
)
Expand Down Expand Up @@ -478,6 +483,25 @@ test('chat.completions.create', async (t) => {
assert.equal(events.length, 0, 'should not create llm events')
})

await t.test('should not create segment or llm events when ai_monitoring.enabled is false', (t, end) => {
const { client, agent } = t.nr
agent.config.ai_monitoring.enabled = false
helper.runInTransaction(agent, async (tx) => {
await client.chat.completions.create({
messages: [{ role: 'user', content: 'You are a mathematician.' }]
})

const events = agent.customEventAggregator.events.toArray()
assert.equal(events.length, 0, 'should not create llm events when ai_monitoring is disabled')

const children = tx.trace.segments.root.children
assert.equal(children.length, 0, 'should not create OpenAI completion segment')

tx.end()
end()
})
})

await t.test('auth errors should be tracked', async (t) => {
const { client, agent } = t.nr
const plan = tspl(t, { plan: 13 })
Expand Down
20 changes: 20 additions & 0 deletions test/versioned/openai/embeddings.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -188,3 +188,23 @@ test('should add llm attribute to transaction', (t, end) => {
end()
})
})

test('should not create segment or llm events when ai_monitoring.enabled is false', (t, end) => {
const { client, agent } = t.nr
agent.config.ai_monitoring.enabled = false
helper.runInTransaction(agent, async (tx) => {
await client.embeddings.create({
input: 'This is an embedding test.',
model: 'text-embedding-ada-002'
})

const events = agent.customEventAggregator.events.toArray()
assert.equal(events.length, 0, 'should not create llm events when ai_monitoring is disabled')

const children = tx.trace.segments.root.children
assert.equal(children.length, 0, 'should not create OpenAI completion segment')

tx.end()
end()
})
})
Loading