|
14 | 14 |
|
15 | 15 | from langchain_core.callbacks import CallbackManagerForLLMRun
|
16 | 16 | from langchain_core.messages import BaseMessage, AIMessageChunk
|
17 |
| -from langchain_core.outputs import ChatGenerationChunk |
| 17 | +from langchain_core.outputs import ChatGenerationChunk, ChatResult |
| 18 | +from langchain_core.runnables import RunnableConfig |
| 19 | +from langchain_core.language_models.chat_models import BaseChatModel |
18 | 20 |
|
19 | 21 | from sentry_sdk import start_transaction
|
20 |
| -from sentry_sdk.integrations.langchain import LangchainIntegration |
| 22 | +from sentry_sdk.integrations.langchain import ( |
| 23 | + LangchainIntegration, |
| 24 | + SentryLangchainCallback, |
| 25 | +) |
21 | 26 | from langchain.agents import tool, AgentExecutor, create_openai_tools_agent
|
22 | 27 | from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
23 | 28 |
|
@@ -342,3 +347,72 @@ def test_span_origin(sentry_init, capture_events):
|
342 | 347 | assert event["contexts"]["trace"]["origin"] == "manual"
|
343 | 348 | for span in event["spans"]:
|
344 | 349 | assert span["origin"] == "auto.ai.langchain"
|
| 350 | + |
| 351 | + |
| 352 | +def test_manual_callback_no_duplication(sentry_init): |
| 353 | + """ |
| 354 | + Test that when a user manually provides a SentryLangchainCallback, |
| 355 | + the integration doesn't create a duplicate callback. |
| 356 | + """ |
| 357 | + |
| 358 | + # Track callback instances |
| 359 | + tracked_callback_instances = set() |
| 360 | + |
| 361 | + class CallbackTrackingModel(BaseChatModel): |
| 362 | + """Mock model that tracks callback instances for testing.""" |
| 363 | + |
| 364 | + def _generate( |
| 365 | + self, |
| 366 | + messages, |
| 367 | + stop=None, |
| 368 | + run_manager=None, |
| 369 | + **kwargs, |
| 370 | + ): |
| 371 | + # Track all SentryLangchainCallback instances |
| 372 | + if run_manager: |
| 373 | + for handler in run_manager.handlers: |
| 374 | + if isinstance(handler, SentryLangchainCallback): |
| 375 | + tracked_callback_instances.add(id(handler)) |
| 376 | + |
| 377 | + for handler in run_manager.inheritable_handlers: |
| 378 | + if isinstance(handler, SentryLangchainCallback): |
| 379 | + tracked_callback_instances.add(id(handler)) |
| 380 | + |
| 381 | + return ChatResult( |
| 382 | + generations=[ |
| 383 | + ChatGenerationChunk(message=AIMessageChunk(content="Hello!")) |
| 384 | + ], |
| 385 | + llm_output={}, |
| 386 | + ) |
| 387 | + |
| 388 | + @property |
| 389 | + def _llm_type(self): |
| 390 | + return "test_model" |
| 391 | + |
| 392 | + @property |
| 393 | + def _identifying_params(self): |
| 394 | + return {} |
| 395 | + |
| 396 | + sentry_init(integrations=[LangchainIntegration()]) |
| 397 | + |
| 398 | + # Create a manual SentryLangchainCallback |
| 399 | + manual_callback = SentryLangchainCallback( |
| 400 | + max_span_map_size=100, include_prompts=False |
| 401 | + ) |
| 402 | + |
| 403 | + # Create RunnableConfig with the manual callback |
| 404 | + config = RunnableConfig(callbacks=[manual_callback]) |
| 405 | + |
| 406 | + # Invoke the model with the config |
| 407 | + llm = CallbackTrackingModel() |
| 408 | + llm.invoke("Hello", config) |
| 409 | + |
| 410 | + # Verify that only ONE SentryLangchainCallback instance was used |
| 411 | + assert len(tracked_callback_instances) == 1, ( |
| 412 | + f"Expected exactly 1 SentryLangchainCallback instance, " |
| 413 | + f"but found {len(tracked_callback_instances)}. " |
| 414 | + f"This indicates callback duplication occurred." |
| 415 | + ) |
| 416 | + |
| 417 | + # Verify the callback ID matches our manual callback |
| 418 | + assert id(manual_callback) in tracked_callback_instances |
0 commit comments