diff --git a/runner.py b/runner.py index b79e7d8..692df85 100644 --- a/runner.py +++ b/runner.py @@ -6278,6 +6278,7 @@ def __init__(self, script_path: str, script_args: Optional[List[str]] = None, self.max_output_lines = None self.hooks = ExecutionHook() self.monitor_interval = 0.1 + self.config_file = config_file # UPDATED: Phase 2 retry config (replaces old retry_count and retry_delay) self.retry_config = RetryConfig() # Default configuration @@ -6291,8 +6292,10 @@ def __init__(self, script_path: str, script_args: Optional[List[str]] = None, # NEW: Phase 2 features self.enable_history = enable_history self.history_manager = None + self.history_db_path = None if enable_history: db_path = history_db or 'script_runner_history.db' + self.history_db_path = db_path self.history_manager = HistoryManager(db_path=db_path) # NEW: Trend Analysis (Phase 2) @@ -6497,6 +6500,30 @@ def validate_script(self) -> bool: self.logger.warning(f"Script does not have .py extension: {self.script_path}") return True + def get_execution_plan(self) -> Dict[str, Any]: + """Return a structured view of how the script will be executed. + + This helper is used by the CLI ``--dry-run`` flag to show what the + runner would do without actually launching the subprocess. It surfaces + key configuration such as the script path, arguments, timeouts, logging + level, configuration file, and history database state. + + Returns: + Dict[str, Any]: Execution summary including paths and toggles. + """ + return { + 'script_path': os.path.abspath(self.script_path), + 'script_args': list(self.script_args), + 'timeout': self.timeout, + 'log_level': logging.getLevelName(self.logger.level), + 'config_file': os.path.abspath(self.config_file) if self.config_file else None, + 'history_enabled': self.enable_history, + 'history_db': os.path.abspath(self.history_db_path) if self.history_db_path else None, + 'monitor_interval': self.monitor_interval, + 'retry_strategy': self.retry_config.strategy, + 'max_attempts': self.retry_config.max_attempts, + } + def run_script(self, retry_on_failure: bool = False) -> Dict: """Execute script with advanced retry and monitoring capabilities. @@ -7148,8 +7175,10 @@ def main(): parser.add_argument('script', nargs='?', help='Python script to execute') parser.add_argument('script_args', nargs='*', help='Arguments to pass to the script') parser.add_argument('--timeout', type=int, default=None, help='Execution timeout in seconds') - parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], + parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], default='INFO', help='Logging level') + parser.add_argument('--dry-run', action='store_true', + help='Validate the script and show execution plan without running it') parser.add_argument('--config', help='Configuration file (YAML)') parser.add_argument('--monitor-interval', type=float, default=0.1, help='Process monitor sampling interval (seconds)') @@ -8479,6 +8508,19 @@ def main(): enable_history=not args.disable_history ) + if args.dry_run: + try: + runner.validate_script() + except Exception as exc: + logging.error(f"Dry-run validation failed: {exc}") + return 1 + + plan = runner.get_execution_plan() + print("\nDRY-RUN: Execution plan (no script executed)") + for key, value in plan.items(): + print(f" {key}: {value}") + return 0 + runner.monitor_interval = args.monitor_interval runner.suppress_warnings = args.suppress_warnings diff --git a/tests/test_integration.py b/tests/test_integration.py index a5bf15a..b42638f 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -15,6 +15,7 @@ import tempfile import json import time +import subprocess from pathlib import Path from unittest.mock import Mock, patch @@ -66,16 +67,33 @@ def test_history_database_creation(self, tmp_path): """Test that history database is properly created""" script_file = tmp_path / "test_db.py" script_file.write_text("print('test'); exit(0)") - + db_file = tmp_path / "test.db" runner = ScriptRunner(str(script_file), enable_history=True) - + result = runner.run_script() - + # Check if metrics are collected assert 'metrics' in result assert len(result['metrics']) > 0 + def test_cli_dry_run_shows_execution_plan(self, tmp_path): + """Ensure CLI dry-run validates script and prints plan without running it.""" + script_file = tmp_path / "dry_run_target.py" + script_file.write_text("print('should not run during dry-run')") + + result = subprocess.run( + [sys.executable, "-m", "runner", str(script_file), "--dry-run", "--timeout", "3"], + capture_output=True, + text=True, + check=False, + ) + + assert result.returncode == 0 + assert "DRY-RUN: Execution plan" in result.stdout + assert "dry_run_target.py" in result.stdout + assert "timeout: 3" in result.stdout + @pytest.mark.integration class TestAlertIntegration: diff --git a/tests/test_runner_core.py b/tests/test_runner_core.py index 53eb81f..e25d99b 100644 --- a/tests/test_runner_core.py +++ b/tests/test_runner_core.py @@ -60,11 +60,35 @@ def test_runner_with_history(self, tmp_path): script_file = tmp_path / "test.py" script_file.write_text("print('hello')") db_file = tmp_path / "history.db" - + runner = ScriptRunner(str(script_file), enable_history=True, history_db=str(db_file)) - + assert runner.enable_history is True + def test_execution_plan_summary(self, tmp_path): + """Ensure execution plan surfaces key configuration without running script.""" + script_file = tmp_path / "plan.py" + script_file.write_text("print('dry run')") + db_file = tmp_path / "history.db" + + runner = ScriptRunner( + str(script_file), + script_args=["--flag", "value"], + timeout=5, + history_db=str(db_file), + enable_history=True, + log_level="DEBUG", + ) + + plan = runner.get_execution_plan() + + assert plan["script_path"].endswith("plan.py") + assert plan["script_args"] == ["--flag", "value"] + assert plan["timeout"] == 5 + assert plan["history_enabled"] is True + assert plan["history_db"].endswith("history.db") + assert plan["log_level"] == "DEBUG" + @pytest.mark.unit class TestScriptExecution: