Skip to content

Commit e0042af

Browse files
authoredDec 3, 2024··
Python: graduate filters, add exception during addition and some cleanup (#9856)
### Motivation and Context <!-- Thank you for your contribution to the semantic-kernel repo! Please help reviewers and future users, providing the following information: 1. Why is this change required? 2. What problem does it solve? 3. What scenario does it contribute to? 4. If it fixes an open issue, please link to the issue here. --> This PR graduates the filters. This includes some updates to the docstrings. And adds a specific Exception for errors during adding or removing of filters. Closes #9838 Fixes #9641 ### Description <!-- Describe your changes, the overall approach, the underlying design. These notes will help understanding how your code works. Thanks! --> ### Contribution Checklist <!-- Before submitting this PR, please make sure: --> - [x] The code builds clean without any errors or warnings - [x] The PR follows the [SK Contribution Guidelines](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md) and the [pre-submission formatting script](https://github.com/microsoft/semantic-kernel/blob/main/CONTRIBUTING.md#development-scripts) raises no violations - [x] All unit tests pass, and I have added new tests where possible - [x] I didn't break anyone 😄

24 files changed

+179
-90
lines changed
 

‎python/pyproject.toml

+1
Original file line numberDiff line numberDiff line change
@@ -146,6 +146,7 @@ environments = [
146146
]
147147

148148
[tool.pytest.ini_options]
149+
testpaths = 'tests'
149150
addopts = "-ra -q -r fEX"
150151
asyncio_default_fixture_loop_scope = "function"
151152
filterwarnings = [

‎python/samples/concepts/filtering/auto_function_invoke_filters.py

+4-11
Original file line numberDiff line numberDiff line change
@@ -4,19 +4,12 @@
44
import os
55

66
from semantic_kernel import Kernel
7-
from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior
7+
from semantic_kernel.connectors.ai import FunctionChoiceBehavior
88
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion, OpenAIChatPromptExecutionSettings
9-
from semantic_kernel.contents import ChatHistory
10-
from semantic_kernel.contents.chat_message_content import ChatMessageContent
11-
from semantic_kernel.contents.function_call_content import FunctionCallContent
12-
from semantic_kernel.contents.function_result_content import FunctionResultContent
9+
from semantic_kernel.contents import ChatHistory, ChatMessageContent, FunctionCallContent, FunctionResultContent
1310
from semantic_kernel.core_plugins import MathPlugin, TimePlugin
14-
from semantic_kernel.filters.auto_function_invocation.auto_function_invocation_context import (
15-
AutoFunctionInvocationContext,
16-
)
17-
from semantic_kernel.filters.filter_types import FilterTypes
18-
from semantic_kernel.functions import KernelArguments
19-
from semantic_kernel.functions.function_result import FunctionResult
11+
from semantic_kernel.filters import AutoFunctionInvocationContext, FilterTypes
12+
from semantic_kernel.functions import FunctionResult, KernelArguments
2013

2114
system_message = """
2215
You are a chat bot. Your name is Mosscap and

‎python/samples/concepts/filtering/function_invocation_filters.py

+5-6
Original file line numberDiff line numberDiff line change
@@ -6,12 +6,11 @@
66
from collections.abc import Callable, Coroutine
77
from typing import Any
88

9-
from semantic_kernel.connectors.ai.open_ai.services.azure_chat_completion import AzureChatCompletion
10-
from semantic_kernel.contents.chat_history import ChatHistory
11-
from semantic_kernel.exceptions.kernel_exceptions import OperationCancelledException
12-
from semantic_kernel.filters.filter_types import FilterTypes
13-
from semantic_kernel.filters.functions.function_invocation_context import FunctionInvocationContext
14-
from semantic_kernel.kernel import Kernel
9+
from semantic_kernel import Kernel
10+
from semantic_kernel.connectors.ai.open_ai import AzureChatCompletion
11+
from semantic_kernel.contents import ChatHistory
12+
from semantic_kernel.exceptions import OperationCancelledException
13+
from semantic_kernel.filters import FilterTypes, FunctionInvocationContext
1514

1615
logger = logging.getLogger(__name__)
1716

‎python/samples/concepts/filtering/function_invocation_filters_stream.py

+14-9
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,15 @@
33
import asyncio
44
import logging
55
import os
6+
from collections.abc import Callable, Coroutine
67
from functools import reduce
8+
from typing import Any
79

8-
from semantic_kernel.connectors.ai.open_ai.services.open_ai_chat_completion import OpenAIChatCompletion
9-
from semantic_kernel.contents import AuthorRole
10-
from semantic_kernel.contents.chat_history import ChatHistory
11-
from semantic_kernel.contents.streaming_chat_message_content import StreamingChatMessageContent
12-
from semantic_kernel.filters.filter_types import FilterTypes
13-
from semantic_kernel.functions.function_result import FunctionResult
14-
from semantic_kernel.kernel import Kernel
10+
from semantic_kernel import Kernel
11+
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
12+
from semantic_kernel.contents import AuthorRole, ChatHistory, StreamingChatMessageContent
13+
from semantic_kernel.filters import FilterTypes, FunctionInvocationContext
14+
from semantic_kernel.functions import FunctionResult
1515

1616
logger = logging.getLogger(__name__)
1717

@@ -32,15 +32,20 @@
3232
# in the specific case of a filter for streaming functions, you need to override the generator
3333
# that is present in the function_result.value as seen below.
3434
@kernel.filter(FilterTypes.FUNCTION_INVOCATION)
35-
async def streaming_exception_handling(context, next):
35+
async def streaming_exception_handling(
36+
context: FunctionInvocationContext,
37+
next: Callable[[FunctionInvocationContext], Coroutine[Any, Any, None]],
38+
):
3639
await next(context)
3740

3841
async def override_stream(stream):
3942
try:
4043
async for partial in stream:
4144
yield partial
4245
except Exception as e:
43-
yield [StreamingChatMessageContent(role=AuthorRole.ASSISTANT, content=f"Exception caught: {e}")]
46+
yield [
47+
StreamingChatMessageContent(role=AuthorRole.ASSISTANT, content=f"Exception caught: {e}", choice_index=0)
48+
]
4449

4550
stream = context.result.value
4651
context.result = FunctionResult(function=context.result.function, value=override_stream(stream))
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
# Copyright (c) Microsoft. All rights reserved.
22

3+
from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior
34
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
45

5-
__all__ = ["PromptExecutionSettings"]
6+
__all__ = ["FunctionChoiceBehavior", "PromptExecutionSettings"]

‎python/semantic_kernel/contents/chat_message_content.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ class ChatMessageContent(KernelContent):
8484
tag: ClassVar[str] = CHAT_MESSAGE_CONTENT_TAG
8585
role: AuthorRole
8686
name: str | None = None
87-
items: list[Annotated[ITEM_TYPES, Field(..., discriminator=DISCRIMINATOR_FIELD)]] = Field(default_factory=list)
87+
items: list[Annotated[ITEM_TYPES, Field(discriminator=DISCRIMINATOR_FIELD)]] = Field(default_factory=list)
8888
encoding: str | None = None
8989
finish_reason: FinishReason | None = None
9090

‎python/semantic_kernel/exceptions/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
from semantic_kernel.exceptions.agent_exceptions import * # noqa: F403
44
from semantic_kernel.exceptions.content_exceptions import * # noqa: F403
5+
from semantic_kernel.exceptions.filter_exceptions import * # noqa: F403
56
from semantic_kernel.exceptions.function_exceptions import * # noqa: F403
67
from semantic_kernel.exceptions.kernel_exceptions import * # noqa: F403
78
from semantic_kernel.exceptions.memory_connector_exceptions import * # noqa: F403
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# Copyright (c) Microsoft. All rights reserved.
2+
from semantic_kernel.exceptions.kernel_exceptions import KernelException
3+
4+
5+
class FilterException(KernelException):
6+
"""Base class for all filter exceptions."""
7+
8+
pass
9+
10+
11+
class FilterManagementException(FilterException):
12+
"""An error occurred while adding or removing the filter to/from the kernel."""
13+
14+
pass
15+
16+
17+
__all__ = [
18+
"FilterException",
19+
"FilterManagementException",
20+
]
+15
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
# Copyright (c) Microsoft. All rights reserved.
2+
3+
from semantic_kernel.filters.auto_function_invocation.auto_function_invocation_context import (
4+
AutoFunctionInvocationContext,
5+
)
6+
from semantic_kernel.filters.filter_types import FilterTypes
7+
from semantic_kernel.filters.functions.function_invocation_context import FunctionInvocationContext
8+
from semantic_kernel.filters.prompts.prompt_render_context import PromptRenderContext
9+
10+
__all__ = [
11+
"AutoFunctionInvocationContext",
12+
"FilterTypes",
13+
"FunctionInvocationContext",
14+
"PromptRenderContext",
15+
]

‎python/semantic_kernel/filters/auto_function_invocation/auto_function_invocation_context.py

+21-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,27 @@
1010

1111

1212
class AutoFunctionInvocationContext(FilterContextBase):
13-
"""Class for auto function invocation context."""
13+
"""Class for auto function invocation context.
14+
15+
This is the context supplied to the auto function invocation filters.
16+
17+
Common use case are to alter the function_result, for instance filling it with a pre-computed
18+
value, in order to skip a step, for instance when doing caching.
19+
20+
Another option is to terminate, this can be done by setting terminate to True.
21+
22+
Attributes:
23+
function: The function invoked.
24+
kernel: The kernel used.
25+
arguments: The arguments used to call the function.
26+
chat_history: The chat history or None.
27+
function_result: The function result or None.
28+
request_sequence_index: The request sequence index.
29+
function_sequence_index: The function sequence index.
30+
function_count: The function count.
31+
terminate: The flag to terminate.
32+
33+
"""
1434

1535
chat_history: "ChatHistory | None" = None
1636
function_result: "FunctionResult | None" = None

‎python/semantic_kernel/filters/filter_context_base.py

-2
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,13 @@
33
from typing import TYPE_CHECKING
44

55
from semantic_kernel.kernel_pydantic import KernelBaseModel
6-
from semantic_kernel.utils.experimental_decorator import experimental_class
76

87
if TYPE_CHECKING:
98
from semantic_kernel.functions.kernel_arguments import KernelArguments
109
from semantic_kernel.functions.kernel_function import KernelFunction
1110
from semantic_kernel.kernel import Kernel
1211

1312

14-
@experimental_class
1513
class FilterContextBase(KernelBaseModel):
1614
"""Base class for Kernel Filter Contexts."""
1715

‎python/semantic_kernel/filters/filter_types.py

-3
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,7 @@
22

33
from enum import Enum
44

5-
from semantic_kernel.utils.experimental_decorator import experimental_class
65

7-
8-
@experimental_class
96
class FilterTypes(str, Enum):
107
"""Enum for the filter types."""
118

‎python/semantic_kernel/filters/functions/function_invocation_context.py

+13-1
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,18 @@
99

1010

1111
class FunctionInvocationContext(FilterContextBase):
12-
"""Class for function invocation context."""
12+
"""Class for function invocation context.
13+
14+
This filter can be used to monitor which functions are called.
15+
To log what function was called with which parameters and what output.
16+
Finally it can be used for caching by setting the result value.
17+
18+
Attributes:
19+
function: The function invoked.
20+
kernel: The kernel used.
21+
arguments: The arguments used to call the function.
22+
result: The result of the function, or None.
23+
24+
"""
1325

1426
result: "FunctionResult | None" = None

‎python/semantic_kernel/filters/kernel_filters_extension.py

+12-9
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,10 @@
77

88
from pydantic import Field
99

10+
from semantic_kernel.exceptions.filter_exceptions import FilterManagementException
1011
from semantic_kernel.filters.filter_context_base import FilterContextBase
1112
from semantic_kernel.filters.filter_types import FilterTypes
1213
from semantic_kernel.kernel_pydantic import KernelBaseModel
13-
from semantic_kernel.utils.experimental_decorator import experimental_function
1414

1515
FILTER_CONTEXT_TYPE = TypeVar("FILTER_CONTEXT_TYPE", bound=FilterContextBase)
1616
CALLABLE_FILTER_TYPE = Callable[[FILTER_CONTEXT_TYPE, Callable[[FILTER_CONTEXT_TYPE], None]], None]
@@ -32,7 +32,6 @@ class KernelFilterExtension(KernelBaseModel, ABC):
3232
prompt_rendering_filters: list[tuple[int, CALLABLE_FILTER_TYPE]] = Field(default_factory=list)
3333
auto_function_invocation_filters: list[tuple[int, CALLABLE_FILTER_TYPE]] = Field(default_factory=list)
3434

35-
@experimental_function
3635
def add_filter(self, filter_type: ALLOWED_FILTERS_LITERAL | FilterTypes, filter: CALLABLE_FILTER_TYPE) -> None:
3736
"""Add a filter to the Kernel.
3837
@@ -45,12 +44,17 @@ def add_filter(self, filter_type: ALLOWED_FILTERS_LITERAL | FilterTypes, filter:
4544
filter_type (str): The type of the filter to add (function_invocation, prompt_rendering)
4645
filter (object): The filter to add
4746
47+
Raises:
48+
FilterDefinitionException: If an error occurs while adding the filter to the kernel
49+
4850
"""
49-
if not isinstance(filter_type, FilterTypes):
50-
filter_type = FilterTypes(filter_type)
51-
getattr(self, FILTER_MAPPING[filter_type.value]).insert(0, (id(filter), filter))
51+
try:
52+
if not isinstance(filter_type, FilterTypes):
53+
filter_type = FilterTypes(filter_type)
54+
getattr(self, FILTER_MAPPING[filter_type.value]).insert(0, (id(filter), filter))
55+
except Exception as ecx:
56+
raise FilterManagementException(f"Error adding filter {filter} to {filter_type}") from ecx
5257

53-
@experimental_function
5458
def filter(
5559
self, filter_type: ALLOWED_FILTERS_LITERAL | FilterTypes
5660
) -> Callable[[CALLABLE_FILTER_TYPE], CALLABLE_FILTER_TYPE]:
@@ -64,7 +68,6 @@ def decorator(
6468

6569
return decorator
6670

67-
@experimental_function
6871
def remove_filter(
6972
self,
7073
filter_type: ALLOWED_FILTERS_LITERAL | FilterTypes | None = None,
@@ -83,10 +86,10 @@ def remove_filter(
8386
if filter_type and not isinstance(filter_type, FilterTypes):
8487
filter_type = FilterTypes(filter_type)
8588
if filter_id is None and position is None:
86-
raise ValueError("Either hook_id or position should be provided.")
89+
raise FilterManagementException("Either hook_id or position should be provided.")
8790
if position is not None:
8891
if filter_type is None:
89-
raise ValueError("Please specify the type of filter when using position.")
92+
raise FilterManagementException("Please specify the type of filter when using position.")
9093
getattr(self, FILTER_MAPPING[filter_type]).pop(position)
9194
return
9295
if filter_type:

‎python/semantic_kernel/filters/prompts/prompt_render_context.py

+13-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,19 @@
99

1010

1111
class PromptRenderContext(FilterContextBase):
12-
"""Context for prompt rendering filters."""
12+
"""Context for prompt rendering filters.
13+
14+
When prompt rendering is expensive (for instance when there are expensive functions being called.)
15+
This filter can be used to set the rendered_prompt directly and returning.
16+
17+
Attributes:
18+
function: The function invoked.
19+
kernel: The kernel used.
20+
arguments: The arguments used to call the function.
21+
rendered_prompt: The result of the prompt rendering.
22+
function_result: The result of the function that used the prompt.
23+
24+
"""
1325

1426
rendered_prompt: str | None = None
1527
function_result: "FunctionResult | None" = None

‎python/tests/integration/audio_to_text/audio_to_text_test_base.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
# There is only the whisper model available on Azure OpenAI for audio to text. And that model is
1212
# only available in the North Switzerland region. Therefore, the endpoint is different than the one
1313
# we use for other services.
14-
is_service_setup_for_testing(["AZURE_OPENAI_AUDIO_TO_TEXT_ENDPOINT"])
14+
azure_setup = is_service_setup_for_testing(["AZURE_OPENAI_AUDIO_TO_TEXT_ENDPOINT"], raise_if_not_set=False)
1515

1616

1717
class AudioToTextTestBase:
@@ -22,5 +22,7 @@ def services(self) -> dict[str, AudioToTextClientBase]:
2222
"""Return audio-to-text services."""
2323
return {
2424
"openai": OpenAIAudioToText(),
25-
"azure_openai": AzureAudioToText(endpoint=os.environ["AZURE_OPENAI_AUDIO_TO_TEXT_ENDPOINT"]),
25+
"azure_openai": AzureAudioToText(endpoint=os.environ["AZURE_OPENAI_AUDIO_TO_TEXT_ENDPOINT"])
26+
if azure_setup
27+
else None,
2628
}

‎python/tests/integration/audio_to_text/test_audio_to_text.py

+2
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,8 @@ async def test_audio_to_text(
4949
"""
5050

5151
service = services[service_id]
52+
if not service:
53+
pytest.mark.xfail("Azure Audio to Text not setup.")
5254
result = await service.get_text_content(audio_content)
5355

5456
for word in expected_text:

‎python/tests/integration/completions/chat_completion_test_base.py

+15-11
Original file line numberDiff line numberDiff line change
@@ -56,17 +56,21 @@
5656
# There is no single model in Ollama that supports both image and tool call in chat completion
5757
# We are splitting the Ollama test into three services: chat, image, and tool call. The chat model
5858
# can be any model that supports chat completion. Also, Ollama is only available on Linux runners in our pipeline.
59-
ollama_setup: bool = is_service_setup_for_testing(["OLLAMA_CHAT_MODEL_ID"]) and is_test_running_on_supported_platforms([
60-
"Linux"
61-
])
62-
ollama_image_setup: bool = is_service_setup_for_testing([
63-
"OLLAMA_CHAT_MODEL_ID_IMAGE"
64-
]) and is_test_running_on_supported_platforms(["Linux"])
65-
ollama_tool_call_setup: bool = is_service_setup_for_testing([
66-
"OLLAMA_CHAT_MODEL_ID_TOOL_CALL"
67-
]) and is_test_running_on_supported_platforms(["Linux"])
68-
google_ai_setup: bool = is_service_setup_for_testing(["GOOGLE_AI_API_KEY", "GOOGLE_AI_GEMINI_MODEL_ID"])
69-
vertex_ai_setup: bool = is_service_setup_for_testing(["VERTEX_AI_PROJECT_ID", "VERTEX_AI_GEMINI_MODEL_ID"])
59+
ollama_setup: bool = is_service_setup_for_testing(
60+
["OLLAMA_CHAT_MODEL_ID"], raise_if_not_set=False
61+
) and is_test_running_on_supported_platforms(["Linux"])
62+
ollama_image_setup: bool = is_service_setup_for_testing(
63+
["OLLAMA_CHAT_MODEL_ID_IMAGE"], raise_if_not_set=False
64+
) and is_test_running_on_supported_platforms(["Linux"])
65+
ollama_tool_call_setup: bool = is_service_setup_for_testing(
66+
["OLLAMA_CHAT_MODEL_ID_TOOL_CALL"], raise_if_not_set=False
67+
) and is_test_running_on_supported_platforms(["Linux"])
68+
google_ai_setup: bool = is_service_setup_for_testing(
69+
["GOOGLE_AI_API_KEY", "GOOGLE_AI_GEMINI_MODEL_ID"], raise_if_not_set=False
70+
)
71+
vertex_ai_setup: bool = is_service_setup_for_testing(
72+
["VERTEX_AI_PROJECT_ID", "VERTEX_AI_GEMINI_MODEL_ID"], raise_if_not_set=False
73+
)
7074
onnx_setup: bool = is_service_setup_for_testing(
7175
["ONNX_GEN_AI_CHAT_MODEL_FOLDER"], raise_if_not_set=False
7276
) # Tests are optional for ONNX

‎python/tests/integration/completions/test_text_completion.py

+12-13
Original file line numberDiff line numberDiff line change
@@ -5,9 +5,15 @@
55
from functools import partial, reduce
66
from typing import Any
77

8+
if sys.version_info >= (3, 12):
9+
from typing import override # pragma: no cover
10+
else:
11+
from typing_extensions import override # pragma: no cover
12+
813
import pytest
914
from openai import AsyncAzureOpenAI
1015

16+
from semantic_kernel import Kernel
1117
from semantic_kernel.connectors.ai.bedrock import BedrockTextCompletion, BedrockTextPromptExecutionSettings
1218
from semantic_kernel.connectors.ai.google.google_ai import GoogleAITextCompletion, GoogleAITextPromptExecutionSettings
1319
from semantic_kernel.connectors.ai.google.vertex_ai import VertexAITextCompletion, VertexAITextPromptExecutionSettings
@@ -19,26 +25,19 @@
1925
OpenAITextCompletion,
2026
OpenAITextPromptExecutionSettings,
2127
)
28+
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
2229
from semantic_kernel.connectors.ai.text_completion_client_base import TextCompletionClientBase
2330
from semantic_kernel.contents.chat_message_content import ChatMessageContent
2431
from semantic_kernel.contents.text_content import TextContent
2532
from semantic_kernel.utils.authentication.entra_id_authentication import get_entra_auth_token
26-
27-
if sys.version_info >= (3, 12):
28-
from typing import override # pragma: no cover
29-
else:
30-
from typing_extensions import override # pragma: no cover
31-
32-
from semantic_kernel import Kernel
33-
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
3433
from tests.integration.completions.completion_test_base import CompletionTestBase, ServiceType
3534
from tests.utils import is_service_setup_for_testing, is_test_running_on_supported_platforms, retry
3635

37-
ollama_setup: bool = is_service_setup_for_testing(["OLLAMA_TEXT_MODEL_ID"]) and is_test_running_on_supported_platforms([
38-
"Linux"
39-
])
40-
google_ai_setup: bool = is_service_setup_for_testing(["GOOGLE_AI_API_KEY"])
41-
vertex_ai_setup: bool = is_service_setup_for_testing(["VERTEX_AI_PROJECT_ID"])
36+
ollama_setup: bool = is_service_setup_for_testing(
37+
["OLLAMA_TEXT_MODEL_ID"], raise_if_not_set=False
38+
) and is_test_running_on_supported_platforms(["Linux"])
39+
google_ai_setup: bool = is_service_setup_for_testing(["GOOGLE_AI_API_KEY"], raise_if_not_set=False)
40+
vertex_ai_setup: bool = is_service_setup_for_testing(["VERTEX_AI_PROJECT_ID"], raise_if_not_set=False)
4241
onnx_setup: bool = is_service_setup_for_testing(
4342
["ONNX_GEN_AI_TEXT_MODEL_FOLDER"], raise_if_not_set=False
4443
) # Tests are optional for ONNX

‎python/tests/integration/embeddings/test_embedding_service_base.py

+9-5
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,15 @@
4141
mistral_ai_setup: bool = is_service_setup_for_testing(
4242
["MISTRALAI_API_KEY", "MISTRALAI_EMBEDDING_MODEL_ID"], raise_if_not_set=False
4343
) # We don't have a MistralAI deployment
44-
google_ai_setup: bool = is_service_setup_for_testing(["GOOGLE_AI_API_KEY", "GOOGLE_AI_EMBEDDING_MODEL_ID"])
45-
vertex_ai_setup: bool = is_service_setup_for_testing(["VERTEX_AI_PROJECT_ID", "VERTEX_AI_EMBEDDING_MODEL_ID"])
46-
ollama_setup: bool = is_service_setup_for_testing([
47-
"OLLAMA_EMBEDDING_MODEL_ID"
48-
]) and is_test_running_on_supported_platforms(["Linux"])
44+
google_ai_setup: bool = is_service_setup_for_testing(
45+
["GOOGLE_AI_API_KEY", "GOOGLE_AI_EMBEDDING_MODEL_ID"], raise_if_not_set=False
46+
)
47+
vertex_ai_setup: bool = is_service_setup_for_testing(
48+
["VERTEX_AI_PROJECT_ID", "VERTEX_AI_EMBEDDING_MODEL_ID"], raise_if_not_set=False
49+
)
50+
ollama_setup: bool = is_service_setup_for_testing(
51+
["OLLAMA_EMBEDDING_MODEL_ID"], raise_if_not_set=False
52+
) and is_test_running_on_supported_platforms(["Linux"])
4953

5054

5155
class EmbeddingServiceTestBase:

‎python/tests/unit/agents/test_termination_strategy.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ async def create_channel(self) -> AgentChannel:
2929
return AsyncMock(spec=AgentChannel)
3030

3131

32-
class TestTerminationStrategy(TerminationStrategy):
32+
class TerminationStrategyTest(TerminationStrategy):
3333
"""A test implementation of TerminationStrategy for testing purposes."""
3434

3535
async def should_agent_terminate(self, agent: "Agent", history: list[ChatMessageContent]) -> bool:
@@ -40,7 +40,7 @@ async def should_agent_terminate(self, agent: "Agent", history: list[ChatMessage
4040
@pytest.mark.asyncio
4141
async def test_should_terminate_with_matching_agent():
4242
agent = MockAgent(id="test-agent-id")
43-
strategy = TestTerminationStrategy(agents=[agent])
43+
strategy = TerminationStrategyTest(agents=[agent])
4444

4545
# Assuming history is a list of ChatMessageContent; can be mocked or made minimal
4646
history = [MagicMock(spec=ChatMessageContent)]
@@ -53,7 +53,7 @@ async def test_should_terminate_with_matching_agent():
5353
async def test_should_terminate_with_non_matching_agent():
5454
agent = MockAgent(id="test-agent-id")
5555
non_matching_agent = MockAgent(id="non-matching-agent-id")
56-
strategy = TestTerminationStrategy(agents=[non_matching_agent])
56+
strategy = TerminationStrategyTest(agents=[non_matching_agent])
5757

5858
# Assuming history is a list of ChatMessageContent; can be mocked or made minimal
5959
history = [MagicMock(spec=ChatMessageContent)]
@@ -65,7 +65,7 @@ async def test_should_terminate_with_non_matching_agent():
6565
@pytest.mark.asyncio
6666
async def test_should_terminate_no_agents_in_strategy():
6767
agent = MockAgent(id="test-agent-id")
68-
strategy = TestTerminationStrategy()
68+
strategy = TerminationStrategyTest()
6969

7070
# Assuming history is a list of ChatMessageContent; can be mocked or made minimal
7171
history = [MagicMock(spec=ChatMessageContent)]

‎python/tests/unit/connectors/ai/open_ai/test_openai_request_settings.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,11 @@
2121

2222
############################################
2323
# Test classes for structured output
24-
class TestClass:
24+
class ClassTest:
2525
attribute: str
2626

2727

28-
class TestClassPydantic(KernelBaseModel):
28+
class ClassTestPydantic(KernelBaseModel):
2929
attribute: str
3030

3131

@@ -354,13 +354,13 @@ def test_openai_chat_prompt_execution_settings_with_json_structured_output():
354354

355355
def test_openai_chat_prompt_execution_settings_with_nonpydantic_type_structured_output():
356356
settings = OpenAIChatPromptExecutionSettings()
357-
settings.response_format = TestClass
357+
settings.response_format = ClassTest
358358
assert isinstance(settings.response_format, type)
359359

360360

361361
def test_openai_chat_prompt_execution_settings_with_pydantic_type_structured_output():
362362
settings = OpenAIChatPromptExecutionSettings()
363-
settings.response_format = TestClassPydantic
363+
settings.response_format = ClassTestPydantic
364364
assert issubclass(settings.response_format, BaseModel)
365365

366366

‎python/tests/unit/kernel/test_kernel_filter_extension.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from pytest import fixture, mark, raises
33

44
from semantic_kernel import Kernel
5+
from semantic_kernel.exceptions.filter_exceptions import FilterManagementException
56

67

78
@fixture
@@ -63,15 +64,15 @@ def test_remove_filter_without_type(self, kernel: Kernel, custom_filter, filter_
6364

6465

6566
def test_unknown_filter_type(kernel: Kernel, custom_filter):
66-
with raises(ValueError):
67+
with raises(FilterManagementException):
6768
kernel.add_filter("unknown", custom_filter)
6869

6970

7071
def test_remove_filter_fail(kernel: Kernel):
71-
with raises(ValueError):
72+
with raises(FilterManagementException):
7273
kernel.remove_filter()
7374

7475

7576
def test_remove_filter_fail_position(kernel: Kernel):
76-
with raises(ValueError):
77+
with raises(FilterManagementException):
7778
kernel.remove_filter(position=0)

‎python/tests/unit/schema/test_schema_builder.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ class ModelWithUnionPrimitives:
3535
item: int | str
3636

3737

38-
class TestEnum(Enum):
38+
class EnumTest(Enum):
3939
OPTION_A = "OptionA"
4040
OPTION_B = "OptionB"
4141
OPTION_C = "OptionC"
@@ -370,7 +370,7 @@ def test_build_complex_type_list():
370370

371371

372372
def test_enum_schema():
373-
schema = KernelJsonSchemaBuilder.build(TestEnum, "Test Enum Description")
373+
schema = KernelJsonSchemaBuilder.build(EnumTest, "Test Enum Description")
374374
expected_schema = {
375375
"type": "string",
376376
"enum": ["OptionA", "OptionB", "OptionC"],
@@ -380,7 +380,7 @@ def test_enum_schema():
380380

381381

382382
def test_enum_schema_without_description():
383-
schema = KernelJsonSchemaBuilder.build(TestEnum)
383+
schema = KernelJsonSchemaBuilder.build(EnumTest)
384384
expected_schema = {"type": "string", "enum": ["OptionA", "OptionB", "OptionC"]}
385385
assert schema == expected_schema
386386

0 commit comments

Comments
 (0)
Please sign in to comment.