diff --git a/packages/uipath/pyproject.toml b/packages/uipath/pyproject.toml index 3302e528e..aba6ae877 100644 --- a/packages/uipath/pyproject.toml +++ b/packages/uipath/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "uipath" -version = "2.10.62" +version = "2.10.63" description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools." readme = { file = "README.md", content-type = "text/markdown" } requires-python = ">=3.11" diff --git a/packages/uipath/samples/runtime-simulations-agent/input.json b/packages/uipath/samples/runtime-simulations-agent/input.json new file mode 100644 index 000000000..9bfb2eef8 --- /dev/null +++ b/packages/uipath/samples/runtime-simulations-agent/input.json @@ -0,0 +1,4 @@ +{ + "code": "def add(a, b):\n return a+b\n\ndef divide(a,b):\n return a/b", + "language": "python" +} diff --git a/packages/uipath/samples/runtime-simulations-agent/main.py b/packages/uipath/samples/runtime-simulations-agent/main.py new file mode 100644 index 000000000..46440b459 --- /dev/null +++ b/packages/uipath/samples/runtime-simulations-agent/main.py @@ -0,0 +1,186 @@ +"""Coding agent that reviews code and suggests improvements. + +This sample demonstrates the --simulation flag: the three tool functions +(check_syntax, check_style, suggest_improvements) are decorated with @mockable, +so they can be intercepted by an LLM during a simulated run instead of +requiring a real linter or compiler to be installed. + +Run with real tools: + uipath run main.py:main -f input.json + +Run with simulation (no real tools needed): + uipath run main.py:main -f input.json --simulation "$(cat simulation.json)" +""" + +import logging + +from pydantic import BaseModel +from pydantic.dataclasses import dataclass + +from uipath.eval.mocks import ExampleCall, mockable +from uipath.tracing import traced + +logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# Input / Output models +# --------------------------------------------------------------------------- + + +@dataclass +class CodeReviewInput: + code: str + language: str = "python" + + +class SyntaxResult(BaseModel): + valid: bool + errors: list[str] = [] + + +class StyleResult(BaseModel): + score: int # 0-100 + violations: list[str] = [] + + +class ImprovementResult(BaseModel): + suggestions: list[str] = [] + refactored_snippet: str = "" + + +class CodeReviewOutput(BaseModel): + syntax: SyntaxResult + style: StyleResult + improvements: ImprovementResult + summary: str + + +# --------------------------------------------------------------------------- +# Mockable tool functions +# --------------------------------------------------------------------------- + +CHECK_SYNTAX_EXAMPLES = [ + ExampleCall( + id="valid-python", + input='{"code": "def hello():\\n return 42", "language": "python"}', + output='{"valid": true, "errors": []}', + ), + ExampleCall( + id="syntax-error", + input='{"code": "def hello(\\n return 42", "language": "python"}', + output='{"valid": false, "errors": ["SyntaxError: unexpected EOF"]}', + ), +] + + +@traced(name="check_syntax", span_type="tool") +@mockable(example_calls=CHECK_SYNTAX_EXAMPLES) +async def check_syntax(code: str, language: str = "python") -> SyntaxResult: + """Check code for syntax errors using the language's parser. + + Args: + code: Source code to check. + language: Programming language (default: python). + + Returns: + SyntaxResult with valid flag and list of error messages. + """ + if language != "python": + return SyntaxResult(valid=True, errors=[]) + + try: + compile(code, "", "exec") + return SyntaxResult(valid=True, errors=[]) + except SyntaxError as exc: + return SyntaxResult(valid=False, errors=[str(exc)]) + + +CHECK_STYLE_EXAMPLES = [ + ExampleCall( + id="clean-code", + input='{"code": "def hello():\\n return 42\\n", "language": "python"}', + output='{"score": 95, "violations": []}', + ), + ExampleCall( + id="style-issues", + input='{"code": "def hello( ):\\n return 42", "language": "python"}', + output='{"score": 60, "violations": ["E211 whitespace before \'(\'", "W291 trailing whitespace"]}', + ), +] + + +@traced(name="check_style", span_type="tool") +@mockable(example_calls=CHECK_STYLE_EXAMPLES) +async def check_style(code: str, language: str = "python") -> StyleResult: + """Run style checks (e.g. PEP 8 for Python) on the provided code. + + Args: + code: Source code to check. + language: Programming language (default: python). + + Returns: + StyleResult with a 0-100 score and list of style violations. + """ + # Real implementation would call ruff / pycodestyle / eslint etc. + # For demo purposes we return a perfect score when not simulated. + return StyleResult(score=100, violations=[]) + + +SUGGEST_IMPROVEMENTS_EXAMPLES = [ + ExampleCall( + id="basic-function", + input='{"code": "def add(a, b):\\n return a + b"}', + output=( + '{"suggestions": ["Add type annotations", "Add a docstring"],' + ' "refactored_snippet": "def add(a: int, b: int) -> int:\\n ' + "'''Return the sum of a and b.'''\\n return a + b\"}" + ), + ) +] + + +@traced(name="suggest_improvements", span_type="tool") +@mockable(example_calls=SUGGEST_IMPROVEMENTS_EXAMPLES) +async def suggest_improvements(code: str) -> ImprovementResult: + """Analyse code and return actionable improvement suggestions. + + Args: + code: Source code to analyse. + + Returns: + ImprovementResult with suggestions and an optional refactored snippet. + """ + # Real implementation would call an LLM or static analysis tool. + return ImprovementResult(suggestions=[], refactored_snippet=code) + + +# --------------------------------------------------------------------------- +# Agent entrypoint +# --------------------------------------------------------------------------- + + +@traced(name="main") +async def main(input: CodeReviewInput) -> CodeReviewOutput: + """Orchestrate three code-review tools and produce a unified report. + + Each tool call creates its own OpenTelemetry span with span_type="tool", + which enables trajectory-based evaluation and simulation. + """ + syntax = await check_syntax(input.code, input.language) + style = await check_style(input.code, input.language) + improvements = await suggest_improvements(input.code) + + issues = len(syntax.errors) + len(style.violations) + summary = ( + f"Found {issues} issue(s). " + f"Style score: {style.score}/100. " + f"{len(improvements.suggestions)} improvement suggestion(s)." + ) + + return CodeReviewOutput( + syntax=syntax, + style=style, + improvements=improvements, + summary=summary, + ) diff --git a/packages/uipath/samples/runtime-simulations-agent/pyproject.toml b/packages/uipath/samples/runtime-simulations-agent/pyproject.toml new file mode 100644 index 000000000..335c55783 --- /dev/null +++ b/packages/uipath/samples/runtime-simulations-agent/pyproject.toml @@ -0,0 +1,14 @@ +[project] +name = "runtime-simulations-agent" +version = "0.0.1" +description = "Code review agent demonstrating runtime simulation" +authors = [{ name = "UiPath", email = "python-sdk@uipath.com" }] +dependencies = [ + "uipath", +] +requires-python = ">=3.11" + +[dependency-groups] +dev = [ + "uipath-dev", +] diff --git a/packages/uipath/samples/runtime-simulations-agent/simulation.json b/packages/uipath/samples/runtime-simulations-agent/simulation.json new file mode 100644 index 000000000..d89bb253f --- /dev/null +++ b/packages/uipath/samples/runtime-simulations-agent/simulation.json @@ -0,0 +1,15 @@ +{ + "enabled": true, + "toolsToSimulate": [ + { + "name": "check_syntax" + }, + { + "name": "check_style" + }, + { + "name": "suggest_improvements" + } + ], + "instructions": "You are simulating a code review system. Given a tool name and its input arguments, produce a realistic JSON response that matches the tool's output schema.\n\n- check_syntax: return {\"valid\": , \"errors\": [, ...]}. If the code looks syntactically correct return valid=true and an empty errors list. Otherwise list the syntax errors.\n- check_style: return {\"score\": <0-100>, \"violations\": [, ...]}. Evaluate PEP 8 compliance for Python code. Deduct points for missing spaces, missing type annotations, etc.\n- suggest_improvements: return {\"suggestions\": [, ...], \"refactored_snippet\": \"\"}. Suggest concrete improvements such as adding type hints, docstrings, or handling edge cases (e.g. division by zero)." +} \ No newline at end of file diff --git a/packages/uipath/samples/runtime-simulations-agent/uipath.json b/packages/uipath/samples/runtime-simulations-agent/uipath.json new file mode 100644 index 000000000..9b02c2654 --- /dev/null +++ b/packages/uipath/samples/runtime-simulations-agent/uipath.json @@ -0,0 +1,5 @@ +{ + "functions": { + "main": "main.py:main" + } +} diff --git a/packages/uipath/src/uipath/_cli/cli_run.py b/packages/uipath/src/uipath/_cli/cli_run.py index 5bfd811ed..48f42018b 100644 --- a/packages/uipath/src/uipath/_cli/cli_run.py +++ b/packages/uipath/src/uipath/_cli/cli_run.py @@ -1,12 +1,14 @@ import asyncio import click +from pydantic import ValidationError from uipath._cli._chat._bridge import get_chat_bridge from uipath._cli._debug._bridge import ConsoleDebugBridge from uipath._cli._utils._common import read_resource_overwrites_from_file from uipath._cli._utils._debug import setup_debugging from uipath.core.tracing import UiPathTraceManager +from uipath.eval.mocks import SimulationConfig, UiPathMockRuntime, build_mocking_context from uipath.platform.common import ResourceOverwritesContext, UiPathConfig from uipath.runtime import ( UiPathExecuteOptions, @@ -101,6 +103,12 @@ def get_usage_help(self) -> list[str]: is_flag=True, help="Keep the temporary state file even when not resuming and no job id is provided", ) +@click.option( + "--simulation", + required=False, + default=None, + help="Simulation config as a JSON object (same schema as simulation.json)", +) @track_command("run") def run( entrypoint: str | None, @@ -114,6 +122,7 @@ def run( debug: bool, debug_port: int, keep_state_file: bool, + simulation: str | None, ) -> None: """Execute the project.""" input_file = file or input_file @@ -122,6 +131,14 @@ def run( if not setup_debugging(debug, debug_port): console.error(f"Failed to start debug server on port {debug_port}") + simulation_config: SimulationConfig | None = None + if simulation: + try: + simulation_config = SimulationConfig.model_validate_json(simulation) + except (ValidationError, ValueError) as e: + console.error(f"Invalid --simulation config: {e}") + return + result = Middlewares.next( "run", entrypoint, @@ -193,6 +210,7 @@ async def execute() -> None: lambda: read_resource_overwrites_from_file(ctx.runtime_dir) ): with ctx: + base_runtime: UiPathRuntimeProtocol | None = None runtime: UiPathRuntimeProtocol | None = None chat_runtime: UiPathRuntimeProtocol | None = None factory: UiPathRuntimeFactoryProtocol | None = None @@ -213,10 +231,27 @@ async def execute() -> None: if factory_settings else None ) - runtime = await factory.new_runtime( + base_runtime = await factory.new_runtime( resolved_entrypoint, ctx.conversation_id or ctx.job_id or "default", ) + runtime = base_runtime + + if simulation_config: + schema = await base_runtime.get_schema() + agent_model = None + if schema.metadata and "settings" in schema.metadata: + agent_model = schema.metadata["settings"].get( + "model" + ) + mocking_context = build_mocking_context( + simulation_config, agent_model + ) + if mocking_context: + runtime = UiPathMockRuntime( + delegate=base_runtime, + mocking_context=mocking_context, + ) if ctx.job_id: if UiPathConfig.is_tracing_enabled: @@ -243,8 +278,10 @@ async def execute() -> None: finally: if chat_runtime: await chat_runtime.dispose() - if runtime: + if runtime is not None and runtime is not base_runtime: await runtime.dispose() + if base_runtime is not None: + await base_runtime.dispose() if factory: await factory.dispose() diff --git a/packages/uipath/src/uipath/eval/mocks/__init__.py b/packages/uipath/src/uipath/eval/mocks/__init__.py index 95dfb877e..f9e7da177 100644 --- a/packages/uipath/src/uipath/eval/mocks/__init__.py +++ b/packages/uipath/src/uipath/eval/mocks/__init__.py @@ -1,14 +1,21 @@ """Mock interface.""" from ._mock_context import is_tool_simulated -from ._mock_runtime import UiPathMockRuntime -from ._types import ExampleCall, MockingContext +from ._mock_runtime import ( + UiPathMockRuntime, + build_mocking_context, + build_mocking_context_from_dict, +) +from ._types import ExampleCall, MockingContext, SimulationConfig from .mockable import mockable __all__ = [ "ExampleCall", - "UiPathMockRuntime", "MockingContext", - "mockable", + "SimulationConfig", + "UiPathMockRuntime", + "build_mocking_context", + "build_mocking_context_from_dict", "is_tool_simulated", + "mockable", ] diff --git a/packages/uipath/src/uipath/eval/mocks/_mock_runtime.py b/packages/uipath/src/uipath/eval/mocks/_mock_runtime.py index df41dadeb..71036215b 100644 --- a/packages/uipath/src/uipath/eval/mocks/_mock_runtime.py +++ b/packages/uipath/src/uipath/eval/mocks/_mock_runtime.py @@ -2,7 +2,6 @@ from __future__ import annotations -import json import logging import uuid from collections.abc import AsyncGenerator @@ -29,12 +28,70 @@ MockingContext, MockingStrategyType, ModelSettings, - ToolSimulation, + SimulationConfig, ) logger = logging.getLogger(__name__) +def build_mocking_context( + config: SimulationConfig, agent_model: str | None = None +) -> MockingContext | None: + """Build a MockingContext from a validated SimulationConfig. + + Args: + config: Validated simulation config. + agent_model: Optional agent model name to use as fallback. + + Returns: + MockingContext if enabled and tools are specified, None otherwise. + """ + if not config.enabled or not config.tools_to_simulate: + return None + + model = ( + ModelSettings(model=config.model) + if config.model + else ModelSettings(model=agent_model) + if agent_model + else None + ) + + mocking_strategy = LLMMockingStrategy( + type=MockingStrategyType.LLM, + prompt=config.instructions, + tools_to_simulate=config.tools_to_simulate, + model=model, + ) + + logger.debug( + f"Loaded simulation config for {len(config.tools_to_simulate)} tool(s)" + ) + return MockingContext( + strategy=mocking_strategy, + name="debug-simulation", + inputs={}, + ) + + +def build_mocking_context_from_dict( + simulation_data: dict[str, Any], agent_model: str | None = None +) -> MockingContext | None: + """Build a MockingContext from a simulation config dictionary. + + Deprecated: prefer build_mocking_context with a validated SimulationConfig. + + Args: + simulation_data: Parsed simulation config (same schema as simulation.json). + agent_model: Optional agent model name to use as fallback. + + Returns: + MockingContext if valid and enabled, None otherwise. + """ + config = SimulationConfig.model_validate(simulation_data) + return build_mocking_context(config, agent_model) + + def load_simulation_config(agent_model: str | None = None) -> MockingContext | None: """Load simulation.json from current directory and convert to MockingContext. @@ -48,48 +105,10 @@ def load_simulation_config(agent_model: str | None = None) -> MockingContext | N return None try: - with open(simulation_path, "r", encoding="utf-8") as f: - simulation_data = json.load(f) - - # Check if simulation is enabled - if not simulation_data.get("enabled", True): - return None - - # Extract tools to simulate - tools_to_simulate = [ - ToolSimulation(name=tool["name"]) - for tool in simulation_data.get("toolsToSimulate", []) - ] - - if not tools_to_simulate: - return None - - # Honor model from simulation config if specified, otherwise use the agent model - simulation_model = simulation_data.get("model") - model = ( - ModelSettings(model=simulation_model) - if simulation_model - else ModelSettings(model=agent_model) - if agent_model - else None - ) - - mocking_strategy = LLMMockingStrategy( - type=MockingStrategyType.LLM, - prompt=simulation_data.get("instructions", ""), - tools_to_simulate=tools_to_simulate, - model=model, - ) - - # Create MockingContext for debugging - mocking_context = MockingContext( - strategy=mocking_strategy, - name="debug-simulation", - inputs={}, + config = SimulationConfig.model_validate_json( + simulation_path.read_text(encoding="utf-8") ) - - logger.info(f"Loaded simulation config for {len(tools_to_simulate)} tool(s)") - return mocking_context + return build_mocking_context(config, agent_model) except Exception as e: logger.warning(f"Failed to load simulation.json: {e}") diff --git a/packages/uipath/src/uipath/eval/mocks/_types.py b/packages/uipath/src/uipath/eval/mocks/_types.py index 827569879..070040b65 100644 --- a/packages/uipath/src/uipath/eval/mocks/_types.py +++ b/packages/uipath/src/uipath/eval/mocks/_types.py @@ -129,6 +129,21 @@ class MockingContext(BaseModel): name: str = Field(default="debug") +class SimulationConfig(BaseModel): + """Top-level schema for simulation.json / --simulation flag.""" + + enabled: bool = True + tools_to_simulate: list[ToolSimulation] = Field( + default_factory=list, alias="toolsToSimulate" + ) + instructions: str = "" + model: str | None = None + + model_config = ConfigDict( + validate_by_name=True, validate_by_alias=True, extra="allow" + ) + + class ExampleCall(BaseModel): """Example call for a resource containing resource I/O.""" diff --git a/packages/uipath/testcases/simulation-testcase/pyproject.toml b/packages/uipath/testcases/simulation-testcase/pyproject.toml new file mode 100644 index 000000000..d37877dbf --- /dev/null +++ b/packages/uipath/testcases/simulation-testcase/pyproject.toml @@ -0,0 +1,12 @@ +[project] +name = "simulation-testcase" +version = "0.0.1" +description = "simulation-testcase" +authors = [{ name = "UiPath", email = "python-sdk@uipath.com" }] +dependencies = [ + "uipath", +] +requires-python = ">=3.11" + +[tool.uv.sources] +uipath = { path = "../../", editable = true } diff --git a/packages/uipath/testcases/simulation-testcase/run.sh b/packages/uipath/testcases/simulation-testcase/run.sh new file mode 100644 index 000000000..0095cc904 --- /dev/null +++ b/packages/uipath/testcases/simulation-testcase/run.sh @@ -0,0 +1,29 @@ +#!/bin/bash +set -e + +TESTCASE_DIR="$(cd "$(dirname "$0")" && pwd)" +SAMPLE_DIR="$(cd "$TESTCASE_DIR/../../samples/runtime-simulations-agent" && pwd)" + +echo "Syncing testcase dependencies (local editable uipath)..." +uv sync --project "$TESTCASE_DIR" + +UIPATH_BIN="$TESTCASE_DIR/.venv/bin/uipath" + +# Run auth and agent from the sample dir so credentials are stored and read +# from the same location. +cd "$SAMPLE_DIR" + +echo "Authenticating with UiPath..." +"$UIPATH_BIN" auth \ + --client-id="$CLIENT_ID" \ + --client-secret="$CLIENT_SECRET" \ + --base-url="$BASE_URL" + +echo "Running agent with simulation..." +"$UIPATH_BIN" run main \ + -f input.json \ + --simulation "$(cat simulation.json)" 2>&1 | tee "$TESTCASE_DIR/run.log" + +# Copy the runtime output file back to the testcase dir for assert.py +mkdir -p "$TESTCASE_DIR/__uipath" +cp "$SAMPLE_DIR/__uipath/output.json" "$TESTCASE_DIR/__uipath/output.json" diff --git a/packages/uipath/testcases/simulation-testcase/src/assert.py b/packages/uipath/testcases/simulation-testcase/src/assert.py new file mode 100644 index 000000000..fd7e89697 --- /dev/null +++ b/packages/uipath/testcases/simulation-testcase/src/assert.py @@ -0,0 +1,57 @@ +import json +import os + +# ── 1. Verify agent output exists and succeeded ────────────────────────────── +output_file = "__uipath/output.json" +assert os.path.isfile(output_file), "Agent output file not found" + +with open(output_file, "r", encoding="utf-8") as f: + output_data = json.load(f) + +status = output_data.get("status") +assert status == "successful", f"Agent execution failed with status: {status}" + +output = output_data.get("output", {}) + +assert "syntax" in output, "Missing 'syntax' in output" +assert "style" in output, "Missing 'style' in output" +assert "improvements" in output, "Missing 'improvements' in output" +assert "summary" in output, "Missing 'summary' in output" + +assert isinstance(output["syntax"]["valid"], bool), "'syntax.valid' must be a bool" +assert isinstance(output["syntax"]["errors"], list), "'syntax.errors' must be a list" + +score = output["style"]["score"] +assert isinstance(score, int), "'style.score' must be an int" +assert 0 <= score <= 100, f"'style.score' out of range: {score}" +assert isinstance(output["style"]["violations"], list), ( + "'style.violations' must be a list" +) + +assert isinstance(output["improvements"]["suggestions"], list), ( + "'improvements.suggestions' must be a list" +) +assert isinstance(output["improvements"]["refactored_snippet"], str), ( + "'improvements.refactored_snippet' must be a str" +) + +# ── 2. Verify simulation produced non-default values ───────────────────────── +# Real tool impls always return: score=100, violations=[], suggestions=[]. +# The LLM simulation should detect issues in the input code and return richer output. +simulated_something = ( + score < 100 + or len(output["style"]["violations"]) > 0 + or len(output["improvements"]["suggestions"]) > 0 +) +assert simulated_something, ( + "Output matches hardcoded real-tool defaults — simulation may not have run. " + f"style.score={score}, violations={output['style']['violations']}, " + f"suggestions={output['improvements']['suggestions']}" +) + +print( + f"Simulation confirmed: score={score}, " + f"violations={len(output['style']['violations'])}, " + f"suggestions={len(output['improvements']['suggestions'])}" +) +print("All assertions passed.") diff --git a/packages/uipath/tests/cli/eval/mocks/test_mocks.py b/packages/uipath/tests/cli/eval/mocks/test_mocks.py index bdbdd3dc2..c4bc26ee3 100644 --- a/packages/uipath/tests/cli/eval/mocks/test_mocks.py +++ b/packages/uipath/tests/cli/eval/mocks/test_mocks.py @@ -929,3 +929,130 @@ async def foo(*args, **kwargs) -> dict[str, Any]: }, }, } + + +class TestUiPathMockRuntime: + """Tests for UiPathMockRuntime execute/stream/get_schema paths.""" + + def _make_context(self) -> MockingContext: + return MockingContext( + strategy=LLMMockingStrategy( + prompt="test", + tools_to_simulate=[ToolSimulation(name="my_tool")], + ), + name="test", + inputs={}, + ) + + async def test_execute_with_mocking_context_sets_and_clears(self): + from unittest.mock import AsyncMock, patch + + from uipath.eval.mocks._mock_runtime import UiPathMockRuntime + + delegate = MagicMock() + mock_result = MagicMock() + delegate.execute = AsyncMock(return_value=mock_result) + + runtime = UiPathMockRuntime( + delegate=delegate, + mocking_context=self._make_context(), + ) + + with ( + patch("uipath.eval.mocks._mock_runtime.set_execution_context") as mock_set, + patch( + "uipath.eval.mocks._mock_runtime.clear_execution_context" + ) as mock_clear, + ): + result = await runtime.execute({"key": "value"}) + + assert result is mock_result + mock_set.assert_called_once() + mock_clear.assert_called_once() + + async def test_stream_with_mocking_context_sets_and_clears(self): + from unittest.mock import patch + + from uipath.eval.mocks._mock_runtime import UiPathMockRuntime + + sentinel = object() + + async def _gen(*args, **kwargs): + yield sentinel + + delegate = MagicMock() + delegate.stream = _gen + + runtime = UiPathMockRuntime( + delegate=delegate, + mocking_context=self._make_context(), + ) + + with ( + patch("uipath.eval.mocks._mock_runtime.set_execution_context") as mock_set, + patch( + "uipath.eval.mocks._mock_runtime.clear_execution_context" + ) as mock_clear, + ): + events = [e async for e in runtime.stream({})] + + assert events == [sentinel] + mock_set.assert_called_once() + mock_clear.assert_called_once() + + async def test_stream_without_mocking_context_passes_through(self): + from unittest.mock import patch + + from uipath.eval.mocks._mock_runtime import UiPathMockRuntime + + sentinel = object() + + async def _gen(*args, **kwargs): + yield sentinel + + delegate = MagicMock() + delegate.stream = _gen + + runtime = UiPathMockRuntime(delegate=delegate, mocking_context=None) + with patch( + "uipath.eval.mocks._mock_runtime.load_simulation_config", return_value=None + ): + runtime._mocking_context = None + events = [e async for e in runtime.stream({})] + + assert events == [sentinel] + + async def test_get_schema_delegates(self): + from unittest.mock import AsyncMock, patch + + from uipath.eval.mocks._mock_runtime import UiPathMockRuntime + + schema = MagicMock() + delegate = MagicMock() + delegate.get_schema = AsyncMock(return_value=schema) + + runtime = UiPathMockRuntime(delegate=delegate, mocking_context=None) + with patch( + "uipath.eval.mocks._mock_runtime.load_simulation_config", return_value=None + ): + result = await runtime.get_schema() + + assert result is schema + + def test_set_execution_context_handles_mocker_creation_failure(self): + from unittest.mock import patch + + from uipath.eval._execution_context import ExecutionSpanCollector + from uipath.eval.mocks._mock_context import mocker_context + from uipath.eval.mocks._mock_runtime import set_execution_context + + context = self._make_context() + with patch( + "uipath.eval.mocks._mock_runtime.MockerFactory.create", + side_effect=RuntimeError("boom"), + ): + set_execution_context(context, ExecutionSpanCollector(), "test-id") + + # mocking_context is set, but mocker_context must be None on failure + assert mocker_context.get() is None + clear_execution_context() diff --git a/packages/uipath/tests/cli/test_run.py b/packages/uipath/tests/cli/test_run.py index 479fc9953..aa182c7c5 100644 --- a/packages/uipath/tests/cli/test_run.py +++ b/packages/uipath/tests/cli/test_run.py @@ -1,4 +1,5 @@ # type: ignore +import json import os from contextlib import asynccontextmanager from unittest.mock import AsyncMock, Mock, patch @@ -448,3 +449,113 @@ def main(input_data: PersonIn) -> PersonOut: assert output_data["email"] == "john@example.com" assert output_data["is_adult"] is True assert output_data["greeting"] == "Hello, John Doe!" + + +_SIMULATION_JSON = { + "enabled": True, + "toolsToSimulate": [{"name": "check_syntax"}, {"name": "check_style"}], + "instructions": "Simulate.", +} + + +class TestRunSimulation: + """Tests for the --simulation flag on the run command.""" + + def _make_factory(self): + factory = Mock() + runtime = Mock() + runtime.stream = Mock(side_effect=_empty_async_gen) + runtime.dispose = AsyncMock() + runtime.get_schema = AsyncMock(return_value=Mock(metadata=None)) + factory.discover_entrypoints.return_value = ["main"] + factory.get_settings = AsyncMock(return_value=None) + factory.dispose = AsyncMock() + factory.new_runtime = AsyncMock(return_value=runtime) + return factory, runtime + + def test_invalid_simulation_json_exits_with_error( + self, runner: CliRunner, temp_dir: str + ): + with runner.isolated_filesystem(temp_dir=temp_dir): + with open("uipath.json", "w") as f: + json.dump({"functions": {"main": "main.py:main"}}, f) + with open("main.py", "w") as f: + f.write("async def main(input): return {}") + + result = runner.invoke( + cli, ["run", "main", "--simulation", "{ not valid json }"] + ) + assert result.exit_code == 1 + assert "Invalid JSON" in result.output + + def test_simulation_wraps_runtime_with_mock_runtime( + self, runner: CliRunner, temp_dir: str + ): + factory, _ = self._make_factory() + + with runner.isolated_filesystem(temp_dir=temp_dir): + with open("uipath.json", "w") as f: + json.dump({"functions": {"main": "main.py:main"}}, f) + with open("main.py", "w") as f: + f.write("async def main(input): return {}") + + with ( + patch( + "uipath._cli.cli_run.Middlewares.next", + return_value=_middleware_continue(), + ), + patch( + "uipath._cli.cli_run.UiPathRuntimeFactoryRegistry.get", + return_value=factory, + ), + patch( + "uipath._cli.cli_run.ResourceOverwritesContext", + side_effect=_mock_resource_overwrites_context, + ), + patch("uipath._cli.cli_run.UiPathMockRuntime") as mock_cls, + ): + mock_cls.return_value = Mock( + stream=Mock(side_effect=_empty_async_gen), + dispose=AsyncMock(), + get_schema=AsyncMock(return_value=Mock(metadata=None)), + ) + runner.invoke( + cli, + ["run", "main", "--simulation", json.dumps(_SIMULATION_JSON)], + ) + + assert mock_cls.called + assert mock_cls.call_args.kwargs["mocking_context"] is not None + + def test_simulation_disabled_does_not_wrap_runtime( + self, runner: CliRunner, temp_dir: str + ): + factory, _ = self._make_factory() + disabled = {**_SIMULATION_JSON, "enabled": False} + + with runner.isolated_filesystem(temp_dir=temp_dir): + with open("uipath.json", "w") as f: + json.dump({"functions": {"main": "main.py:main"}}, f) + with open("main.py", "w") as f: + f.write("async def main(input): return {}") + + with ( + patch( + "uipath._cli.cli_run.Middlewares.next", + return_value=_middleware_continue(), + ), + patch( + "uipath._cli.cli_run.UiPathRuntimeFactoryRegistry.get", + return_value=factory, + ), + patch( + "uipath._cli.cli_run.ResourceOverwritesContext", + side_effect=_mock_resource_overwrites_context, + ), + patch("uipath._cli.cli_run.UiPathMockRuntime") as mock_cls, + ): + runner.invoke( + cli, ["run", "main", "--simulation", json.dumps(disabled)] + ) + + assert not mock_cls.called diff --git a/packages/uipath/uv.lock b/packages/uipath/uv.lock index 3d4c916d3..c51486b96 100644 --- a/packages/uipath/uv.lock +++ b/packages/uipath/uv.lock @@ -2543,7 +2543,7 @@ wheels = [ [[package]] name = "uipath" -version = "2.10.62" +version = "2.10.63" source = { editable = "." } dependencies = [ { name = "applicationinsights" },