diff --git a/pyproject.toml b/pyproject.toml index c162a0293..16fd6a085 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "uipath" -version = "2.4.14" +version = "2.4.15" description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools." readme = { file = "README.md", content-type = "text/markdown" } requires-python = ">=3.11" diff --git a/samples/calculator/evaluations/eval-sets/legacy.json b/samples/calculator/evaluations/eval-sets/legacy.json index 1e3234fae..664070235 100644 --- a/samples/calculator/evaluations/eval-sets/legacy.json +++ b/samples/calculator/evaluations/eval-sets/legacy.json @@ -5,6 +5,7 @@ "batchSize": 10, "evaluatorRefs": [ "equality", + "equality-with-target-key", "llm-as-a-judge", "json-similarity", "trajectory" diff --git a/samples/calculator/evaluations/evaluators/legacy-equality-with-target-key.json b/samples/calculator/evaluations/evaluators/legacy-equality-with-target-key.json new file mode 100644 index 000000000..5b169ee38 --- /dev/null +++ b/samples/calculator/evaluations/evaluators/legacy-equality-with-target-key.json @@ -0,0 +1,11 @@ +{ + "fileName": "equality-with-target-key.json", + "id": "equality-with-target-key", + "name": "Legacy Equality Evaluator With Target Key", + "description": "An evaluator that judges the agent based on expected output under \"result\" key.", + "category": 0, + "type": 1, + "targetOutputKey": "result", + "createdAt": "2025-06-26T17:45:39.651Z", + "updatedAt": "2025-06-26T17:45:39.651Z" +} diff --git a/src/uipath/eval/evaluators/legacy_exact_match_evaluator.py b/src/uipath/eval/evaluators/legacy_exact_match_evaluator.py index 5dc1dd149..b54c30fdf 100644 --- a/src/uipath/eval/evaluators/legacy_exact_match_evaluator.py +++ b/src/uipath/eval/evaluators/legacy_exact_match_evaluator.py @@ -40,7 +40,24 @@ async def evaluate( Returns: EvaluationResult: Boolean result indicating exact match (True/False) """ + actual_output = agent_execution.agent_output + expected_output = evaluation_criteria.expected_output + + if self.target_output_key and self.target_output_key != "*": + if isinstance(actual_output, dict) and isinstance(expected_output, dict): + if not ( + self.target_output_key in actual_output + and self.target_output_key in expected_output + ): + # Assuming that we should pass the test. + expected_output = actual_output = {} + else: + if self.target_output_key in actual_output: + actual_output = actual_output[self.target_output_key] + if self.target_output_key in expected_output: + expected_output = expected_output[self.target_output_key] + return BooleanEvaluationResult( - score=self._canonical_json(agent_execution.agent_output) - == self._canonical_json(evaluation_criteria.expected_output) + score=self._canonical_json(actual_output) + == self._canonical_json(expected_output) ) diff --git a/testcases/calculator-evals/run.sh b/testcases/calculator-evals/run.sh index 45536d713..d9e46a39e 100755 --- a/testcases/calculator-evals/run.sh +++ b/testcases/calculator-evals/run.sh @@ -8,6 +8,7 @@ echo "Authenticating with UiPath..." uv run uipath auth --client-id="$CLIENT_ID" --client-secret="$CLIENT_SECRET" --base-url="$BASE_URL" echo "Running evaluations with custom evaluator..." -uv run uipath eval main ../../samples/calculator/evaluations/eval-sets/default.json --no-report +uv run uipath eval main ../../samples/calculator/evaluations/eval-sets/legacy.json --no-report --output-file legacy.json +uv run uipath eval main ../../samples/calculator/evaluations/eval-sets/default.json --no-report --output-file default.json echo "Test completed successfully!" diff --git a/testcases/calculator-evals/src/assert.py b/testcases/calculator-evals/src/assert.py index 7e8f9f225..023a19bc1 100644 --- a/testcases/calculator-evals/src/assert.py +++ b/testcases/calculator-evals/src/assert.py @@ -12,8 +12,92 @@ def main() -> None: """Main assertion logic.""" # Check if output file exists - output_file = "__uipath/output.json" + for output_file in ["default.json", "legacy.json"]: + assert os.path.isfile(output_file), ( + f"Evaluation output file '{output_file}' not found" + ) + print(f"✓ Found evaluation output file: {output_file}") + + # Load evaluation results + with open(output_file, "r", encoding="utf-8") as f: + output_data = json.load(f) + + print("✓ Loaded evaluation output") + + # Extract output data + output = output_data + + # Validate structure + assert "evaluationSetResults" in output, "Missing 'evaluationSetResults' in output" + + evaluation_results = output["evaluationSetResults"] + assert len(evaluation_results) > 0, "No evaluation results found" + + print(f"✓ Found {len(evaluation_results)} evaluation result(s)") + + # Validate each evaluation result + passed_count = 0 + failed_count = 0 + skipped_count = 0 + has_positive_scores = False + + for eval_result in evaluation_results: + eval_name = eval_result.get("evaluationName", "Unknown") + print(f"\n→ Validating: {eval_name}") + + try: + # Validate evaluation results are present + eval_run_results = eval_result.get("evaluationRunResults", []) + if len(eval_run_results) == 0: + print(f" ⊘ Skipping '{eval_name}' (no evaluation run results)") + skipped_count += 1 + continue + + # Check that evaluations have scores > 0 + all_passed = True + min_score = 100 + for eval_run in eval_run_results: + evaluator_name = eval_run.get("evaluatorName", "Unknown") + result = eval_run.get("result", {}) + score = result.get("score", 0) + min_score = min(min_score, score) + + # Check if score is greater than 0 + if score > 0: + has_positive_scores = True + print(f" ✓ {evaluator_name}: score={score:.1f}") + else: + print(f" ✗ {evaluator_name}: score={score:.1f} (must be > 0)") + all_passed = False + + if all_passed and min_score > 0: + print( + f" ✓ All evaluators passed for '{eval_name}' (min score: {min_score:.1f})" + ) + passed_count += 1 + else: + print(f" ✗ Some evaluators failed for '{eval_name}'") + failed_count += 1 + + except Exception as e: + print(f" ✗ Error validating '{eval_name}': {e}") + failed_count += 1 + # Final summary + print(f"\n{'=' * 60}") + print("Summary:") + print(f" Total evaluations: {passed_count + failed_count + skipped_count}") + print(f" ✓ Passed: {passed_count}") + print(f" ✗ Failed: {failed_count}") + print(f" ⊘ Skipped: {skipped_count}") + print(f"{'=' * 60}") + + assert failed_count == 0, "Some assertions failed" + assert has_positive_scores, "No evaluation scores greater than 0 were found" + + print("\n✅ All assertions passed!") + + output_file = "__uipath/output.json" assert os.path.isfile(output_file), ( f"Evaluation output file '{output_file}' not found" ) @@ -30,79 +114,6 @@ def main() -> None: assert status == "successful", f"Evaluation run failed with status: {status}" print("✓ Evaluation run status: successful") - # Extract output data - output = output_data.get("output", {}) - - # Validate structure - assert "evaluationSetResults" in output, "Missing 'evaluationSetResults' in output" - - evaluation_results = output["evaluationSetResults"] - assert len(evaluation_results) > 0, "No evaluation results found" - - print(f"✓ Found {len(evaluation_results)} evaluation result(s)") - - # Validate each evaluation result - passed_count = 0 - failed_count = 0 - skipped_count = 0 - has_positive_scores = False - - for eval_result in evaluation_results: - eval_name = eval_result.get("evaluationName", "Unknown") - print(f"\n→ Validating: {eval_name}") - - try: - # Validate evaluation results are present - eval_run_results = eval_result.get("evaluationRunResults", []) - if len(eval_run_results) == 0: - print(f" ⊘ Skipping '{eval_name}' (no evaluation run results)") - skipped_count += 1 - continue - - # Check that evaluations have scores > 0 - all_passed = True - min_score = 100 - for eval_run in eval_run_results: - evaluator_name = eval_run.get("evaluatorName", "Unknown") - result = eval_run.get("result", {}) - score = result.get("score", 0) - min_score = min(min_score, score) - - # Check if score is greater than 0 - if score > 0: - has_positive_scores = True - print(f" ✓ {evaluator_name}: score={score:.1f}") - else: - print(f" ✗ {evaluator_name}: score={score:.1f} (must be > 0)") - all_passed = False - - if all_passed and min_score > 0: - print( - f" ✓ All evaluators passed for '{eval_name}' (min score: {min_score:.1f})" - ) - passed_count += 1 - else: - print(f" ✗ Some evaluators failed for '{eval_name}'") - failed_count += 1 - - except Exception as e: - print(f" ✗ Error validating '{eval_name}': {e}") - failed_count += 1 - - # Final summary - print(f"\n{'=' * 60}") - print("Summary:") - print(f" Total evaluations: {passed_count + failed_count + skipped_count}") - print(f" ✓ Passed: {passed_count}") - print(f" ✗ Failed: {failed_count}") - print(f" ⊘ Skipped: {skipped_count}") - print(f"{'=' * 60}") - - assert failed_count == 0, "Some assertions failed" - assert has_positive_scores, "No evaluation scores greater than 0 were found" - - print("\n✅ All assertions passed!") - if __name__ == "__main__": main() diff --git a/tests/cli/evaluators/test_legacy_exact_match_evaluator.py b/tests/cli/evaluators/test_legacy_exact_match_evaluator.py new file mode 100644 index 000000000..5419f4f0e --- /dev/null +++ b/tests/cli/evaluators/test_legacy_exact_match_evaluator.py @@ -0,0 +1,589 @@ +"""Tests for LegacyExactMatchEvaluator. + +Tests the exact match evaluation functionality including target_output_key support, +canonical JSON normalization, and number normalization. +""" + +from unittest.mock import patch + +import pytest + +from uipath._cli._evals._models._evaluator_base_params import EvaluatorBaseParams +from uipath.eval.evaluators import LegacyExactMatchEvaluator +from uipath.eval.evaluators.legacy_base_evaluator import LegacyEvaluationCriteria +from uipath.eval.models.models import ( + AgentExecution, + LegacyEvaluatorCategory, + LegacyEvaluatorType, +) + + +def _make_base_params(target_output_key: str = "*") -> EvaluatorBaseParams: + """Create base parameters for exact match evaluator.""" + return EvaluatorBaseParams( + id="exact_match", + category=LegacyEvaluatorCategory.Deterministic, + evaluator_type=LegacyEvaluatorType.Equals, + name="ExactMatch", + description="Evaluates exact match of outputs", + created_at="2025-01-01T00:00:00Z", + updated_at="2025-01-01T00:00:00Z", + target_output_key=target_output_key, + ) + + +@pytest.fixture +def evaluator(): + """Fixture to create evaluator.""" + with patch("uipath.platform.UiPath"): + return LegacyExactMatchEvaluator( + **_make_base_params().model_dump(), + config={}, + ) + + +@pytest.fixture +def evaluator_with_target_key(): + """Fixture to create evaluator with a specific target output key.""" + with patch("uipath.platform.UiPath"): + return LegacyExactMatchEvaluator( + **_make_base_params(target_output_key="result").model_dump(), + config={}, + ) + + +class TestLegacyExactMatchEvaluator: + """Test suite for LegacyExactMatchEvaluator.""" + + @pytest.mark.asyncio + async def test_exact_match_same_strings(self, evaluator) -> None: + """Test exact match with identical string outputs.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output="Hello World", + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output="Hello World", + expected_agent_behavior="", + ), + ) + + assert result.score is True + assert isinstance(result.score, bool) + + @pytest.mark.asyncio + async def test_exact_match_different_strings(self, evaluator) -> None: + """Test exact match with different string outputs.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output="Hello World", + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output="Goodbye World", + expected_agent_behavior="", + ), + ) + + assert result.score is False + + @pytest.mark.asyncio + async def test_exact_match_identical_dicts(self, evaluator) -> None: + """Test exact match with identical dictionaries.""" + output = {"name": "John", "age": 30} + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output=output, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output=output, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_different_dicts(self, evaluator) -> None: + """Test exact match with different dictionaries.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"name": "John", "age": 30}, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"name": "Jane", "age": 25}, + expected_agent_behavior="", + ), + ) + + assert result.score is False + + @pytest.mark.asyncio + async def test_exact_match_dict_key_order_doesnt_matter(self, evaluator) -> None: + """Test that canonical JSON normalization handles key order.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"a": 1, "b": 2}, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"b": 2, "a": 1}, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_number_normalization_int_to_float( + self, evaluator + ) -> None: + """Test that integers are normalized to floats for comparison.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"value": 42}, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"value": 42.0}, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_number_normalization_in_list(self, evaluator) -> None: + """Test number normalization in lists.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"values": [1, 2, 3]}, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"values": [1.0, 2.0, 3.0]}, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_booleans_preserved(self, evaluator) -> None: + """Test that booleans are not converted to numbers.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"active": True, "deleted": False}, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"active": True, "deleted": False}, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_nested_structures(self, evaluator) -> None: + """Test exact match with nested dictionaries and lists.""" + output = { + "user": { + "name": "John", + "scores": [95, 87, 92], + "active": True, + }, + "metadata": {"version": 1.0}, + } + + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output=output, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output=output, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_with_target_key_both_have_key( + self, evaluator_with_target_key + ) -> None: + """Test target_output_key extraction when both outputs have the key.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"result": {"status": "success"}, "other": "ignore"}, + ) + + result = await evaluator_with_target_key.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"result": {"status": "success"}, "other": "different"}, + expected_agent_behavior="", + ), + ) + + # Both outputs have the target key, so both get extracted and compared + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_with_target_key_missing_in_both( + self, evaluator_with_target_key + ) -> None: + """Test target_output_key when key is missing in both outputs.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"other": "data"}, + ) + + result = await evaluator_with_target_key.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"other": "different"}, + expected_agent_behavior="", + ), + ) + + # When key is missing in both, both are set to {}, so they match + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_with_target_key_missing_in_actual( + self, evaluator_with_target_key + ) -> None: + """Test target_output_key when key is missing in actual output.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"other": "data"}, + ) + + result = await evaluator_with_target_key.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"result": {"status": "success"}}, + expected_agent_behavior="", + ), + ) + + # When key is missing in actual, both are set to {}, so they match + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_with_target_key_missing_in_expected( + self, evaluator_with_target_key + ) -> None: + """Test target_output_key when key is missing in expected output.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"result": {"status": "success"}}, + ) + + result = await evaluator_with_target_key.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"other": "data"}, + expected_agent_behavior="", + ), + ) + + # When key is missing in expected, both are set to {}, so they match + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_with_wildcard_target_key(self, evaluator) -> None: + """Test that wildcard target_output_key compares full outputs.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"data": "value", "extra": "field"}, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"data": "value", "extra": "field"}, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_with_target_key_non_dict_inputs( + self, evaluator_with_target_key + ) -> None: + """Test target_output_key with non-dict inputs (should compare as-is).""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output="string_value", + ) + + result = await evaluator_with_target_key.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output="other_string", + expected_agent_behavior="", + ), + ) + + assert result.score is False + + @pytest.mark.asyncio + async def test_exact_match_null_values(self, evaluator) -> None: + """Test exact match with None values.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"value": None}, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"value": None}, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_empty_dict(self, evaluator) -> None: + """Test exact match with empty dictionaries.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={}, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={}, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_empty_string(self, evaluator) -> None: + """Test exact match with empty strings.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output="", + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output="", + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_unicode_characters(self, evaluator) -> None: + """Test exact match with unicode characters.""" + output = {"greeting": "你好世界", "emoji": "🎉"} + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output=output, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output=output, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_whitespace_matters(self, evaluator) -> None: + """Test that whitespace differences are detected.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output="Hello World", + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output="Hello World", + expected_agent_behavior="", + ), + ) + + assert result.score is False + + @pytest.mark.asyncio + async def test_exact_match_case_sensitivity(self, evaluator) -> None: + """Test that string comparison is case sensitive.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output="Hello", + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output="hello", + expected_agent_behavior="", + ), + ) + + assert result.score is False + + @pytest.mark.asyncio + async def test_exact_match_large_numbers(self, evaluator) -> None: + """Test exact match with large numbers.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"value": 999999999999999}, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"value": 999999999999999.0}, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_floating_point_precision(self, evaluator) -> None: + """Test exact match with floating point numbers.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"pi": 3.14159}, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"pi": 3.14159}, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_float_vs_int_zero(self, evaluator) -> None: + """Test that 0 and 0.0 are considered equal.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"value": 0}, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"value": 0.0}, + expected_agent_behavior="", + ), + ) + + assert result.score is True + + @pytest.mark.asyncio + async def test_exact_match_with_target_key_different_values( + self, evaluator_with_target_key + ) -> None: + """Test target_output_key with different values in target key.""" + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={"result": {"status": "success"}, "other": "ignore"}, + ) + + result = await evaluator_with_target_key.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={"result": {"status": "failed"}, "other": "ignore"}, + expected_agent_behavior="", + ), + ) + + assert result.score is False + + @pytest.mark.asyncio + async def test_canonical_json_normalization(self, evaluator) -> None: + """Test that canonical JSON normalization works correctly.""" + # Create complex nested structure with mixed types + agent_execution = AgentExecution( + agent_input={}, + agent_trace=[], + agent_output={ + "z_key": 1, + "a_key": [3, 2, 1], + "m_key": {"nested": 42}, + }, + ) + + result = await evaluator.evaluate( + agent_execution, + evaluation_criteria=LegacyEvaluationCriteria( + expected_output={ + "a_key": [3, 2, 1], + "m_key": {"nested": 42.0}, + "z_key": 1.0, + }, + expected_agent_behavior="", + ), + ) + + assert result.score is True diff --git a/uv.lock b/uv.lock index 79dfaec13..b1bde89ac 100644 --- a/uv.lock +++ b/uv.lock @@ -2486,7 +2486,7 @@ wheels = [ [[package]] name = "uipath" -version = "2.4.14" +version = "2.4.15" source = { editable = "." } dependencies = [ { name = "applicationinsights" },