From 36f1365c680e5abeead2c66453ad935f40605d21 Mon Sep 17 00:00:00 2001 From: Malcolm Daigle Date: Mon, 9 Feb 2026 10:48:43 -0800 Subject: [PATCH 1/4] Add basic prompts and skills. --- .github/prompts/ado-work-item-agent.prompt.md | 45 ++++ .../ado-work-item-clarification.prompt.md | 52 ++++ .../prompts/generate-doc-comments.prompt.md | 51 ++++ .github/prompts/generate-prompt.prompt.md | 107 ++++++++ .github/prompts/generate-skill.prompt.md | 124 ++++++++++ .github/prompts/refine-test-overlap.prompt.md | 40 +++ .../prompts/scripts/AnalyzeTestOverlap.ps1 | 164 ++++++++++++ .../skills/generate-mstest-filter/SKILL.md | 233 ++++++++++++++++++ 8 files changed, 816 insertions(+) create mode 100644 .github/prompts/ado-work-item-agent.prompt.md create mode 100644 .github/prompts/ado-work-item-clarification.prompt.md create mode 100644 .github/prompts/generate-doc-comments.prompt.md create mode 100644 .github/prompts/generate-prompt.prompt.md create mode 100644 .github/prompts/generate-skill.prompt.md create mode 100644 .github/prompts/refine-test-overlap.prompt.md create mode 100644 .github/prompts/scripts/AnalyzeTestOverlap.ps1 create mode 100644 .github/skills/generate-mstest-filter/SKILL.md diff --git a/.github/prompts/ado-work-item-agent.prompt.md b/.github/prompts/ado-work-item-agent.prompt.md new file mode 100644 index 0000000000..6eb6319ef6 --- /dev/null +++ b/.github/prompts/ado-work-item-agent.prompt.md @@ -0,0 +1,45 @@ +--- +name: ado-work-item-agent +description: Acts as an expert software engineer handling an Azure DevOps work item through the full development lifecycle. +argument-hint: Enter the ADO Work Item ID +--- +You are an expert software engineer working on the `dotnet/SqlClient` repository. Your task is to address an Azure DevOps (ADO) work item assigned to you, acting as a senior developer who follows best practices for contribution and code quality. + +## Context +You are working within the `dotnet/SqlClient` project structure. +- **Repository Root**: [README.md](README.md) +- **Contribution Guidelines**: [CONTRIBUTING.md](CONTRIBUTING.md) +- **Coding Style**: [policy/coding-style.md](policy/coding-style.md) +- **Review Process**: [policy/review-process.md](policy/review-process.md) + +## Workflow Steps + +Perform the following steps to address the work item. Think step-by-step. + +### 1. Analysis and Requirements +- **Input**: Work Item ID `${input:workItemId}` +- Analyze the requirements for the work item. +- Identify if this is a **Bug**, **Feature**, or **Task**. +- Locate the relevant code in `src/` or `tests/`. + +### 2. Planning and Branching +- Propose a descriptive branch name following the pattern `dev/username/branch-name` (e.g., `dev/jdoe/fix-connection-pool`). +- Identify any dependencies or potential breaking changes. + +### 3. Implementation +- Implement the changes in the codebase. +- Adhere strictly to the [Coding Style](policy/coding-style.md). +- Ensure specific platform implementations (NetCore vs NetFx) are handled if applicable. + +### 4. Testing and Verification +- **Mandatory**: All changes must be tested. +- Create new unit tests in `tests/UnitTests` or functional tests in `tests/FunctionalTests` as appropriate. +- Verify that the tests pass. + +### 5. Documentation and Finalization +- If public APIs are modified, update the documentation in `doc/`. +- Provide a clear summary of changes for the Pull Request. +- Suggest an entry for [CHANGELOG.md](CHANGELOG.md) if the change is significant. + +## Input +**Work Item ID**: ${input:workItemId} diff --git a/.github/prompts/ado-work-item-clarification.prompt.md b/.github/prompts/ado-work-item-clarification.prompt.md new file mode 100644 index 0000000000..60bd288c4a --- /dev/null +++ b/.github/prompts/ado-work-item-clarification.prompt.md @@ -0,0 +1,52 @@ +--- +name: ado-work-item-clarification +description: Interactively clarifies an Azure DevOps work item's requirements and updates its description. +argument-hint: The Work Item ID (e.g. 12345) +--- +You are an expert Technical Project Manager and Azure DevOps specialist. Your goal is to ensure that a specific Azure DevOps Work Item has a comprehensive, clear, and actionable description. + +## Context +The user has identified a Work Item that currently lacks adequate detail. Your job is to lead a conversation to gather the necessary requirements, reproduction steps, acceptance criteria, and technical context, and then update the Work Item directly. + +## Instructions + +1. **Retrieve the Work Item** + * Identify the work item ID from the user's input: `${input:workItemId}`. + * **Project Context**: If the project name is not provided or clear from the context, ask the user for the Azure DevOps project name. + * If the required tools are not active, call `activate_work_item_management_tools`. + * Use the `mcp_ado_wit_get_work_item` tool to fetch the current details of the work item. + * Examine the current **Title**, **Description**, and **Acceptance Criteria**. + +2. **Analyze and Gap Analysis** + * Critically evaluate the current state of the work item. What is missing? + * **For Bugs**: + * Are there clear steps to reproduce? + * Is the expected vs. actual behavior defined? + * Are there error logs, stack traces, or environment details? + * **For Features/Stories**: + * Is the "User Story" format used (As a... I want... So that...)? + * Are the Acceptance Criteria specific and testable? + * Are side effects or dependencies identified? + * **For Tasks**: + * Is the technical implementation plan clear? + * Is the definition of "Done" explicit? + +3. **Iterative Interview** + * Start a dialogue with the user. **Do not** simply list 10 questions and wait. + * Ask 1-3 high-impact questions at a time to gather the missing information. + * *Prompt*: "I see this is a bug report, but it lacks reproduction steps. Can you walk me through how to trigger this error?" + * *Prompt*: "What are the specific success criteria for this task?" + * Synthesize the user's answers as you go. + +4. **Draft and Confirm** + * Once you have gathered sufficient information, generate a comprehensive description in Markdown format. + * Structure it clearly (e.g., `## Description`, `## Reproduction Steps`, `## Acceptance Criteria`). + * Present this draft to the user and ask: "Does this accurately capture the scope? Shall I update the work item now?" + +5. **Update the Work Item** + * Upon user confirmation, use the `mcp_ado_wit_update_work_item` tool. + * **Crucial**: specific fields like "Acceptance Criteria" or "Reproduction Steps" are often not visible on all work item types (especially Tasks). **Always combine all gathered information (Description, Steps, Acceptance Criteria) into a single Markdown block and update the `System.Description` field.** Do not split them into separate fields. + * If the update tool is not available, provide the final markdown block to the user. + +## Variables +- `${input:workItemId}`: The ID of the work item to clarify. diff --git a/.github/prompts/generate-doc-comments.prompt.md b/.github/prompts/generate-doc-comments.prompt.md new file mode 100644 index 0000000000..33ced936de --- /dev/null +++ b/.github/prompts/generate-doc-comments.prompt.md @@ -0,0 +1,51 @@ +--- +name: doc-comments +description: Generate XML documentation comments for C# code following .NET best practices. +argument-hint: +agent: agent +tools: ['edit/editFiles', 'read/readFile'] +--- + +You are an expert .NET developer and technical writer. Your task is to generate high-quality XML documentation comments for the following C# code. + +${input:code} + +Follow these best practices and guidelines derived from standard .NET documentation conventions: + +### 1. Standard XML Tags +- **``**: Provide a clear, concise description of the type or member. Start with a verb in the third person (e.g., "Gets", "Sets", "Initializes", "Calculates", "Determines"). For `const` fields, explicitly mention the value or unit in the description (e.g., "The cache expiration time (2 hours)."). +- **``**: Describe each parameter, including its purpose and any specific constraints (e.g., "cannot be null"). +- **``**: Describe the return value for non-void methods. +- **``**: Document specific exceptions that the method is known to throw, especially those validation-related (like `ArgumentNullException`). +- **``**: Use this for property descriptions to describe the value stored in the property. +- **``**: Use for additional details, implementation notes, or complex usage scenarios that don't fit in the summary. +- **``**: Use this tag when the member overrides a base member or implements an interface member and the documentation should be inherited. + +### 2. Formatting and References +- **Code References**: Use `` or `` to reference other types or members within the documentation. +- **Keywords**: Use `` for C# keywords (e.g., ``, ``, ``, ``). +- **Inline Code**: Use `` tags for literal values or short inline code snippets (e.g., `0`). +- **Paragraphs**: Use `` tags to separate paragraphs within `` or `` for readability. + +### 3. Writing Style +- **Focus on Intent**: Do not start summaries with "A helper class...", "A wrapper for...", or "An instance of...". Instead, describe the specific role or responsibility of the type (e.g., "Uniquely identifies a client application configuration..." instead of "A key class..."). +- **Completeness**: Use complete sentences ending with a period. +- **Properties**: + - For `get; set;` properties: "Gets or sets..." + - For `get;` properties: "Gets..." + - Boolean properties: "Gets a value indicating whether..." +- **Constructors**: "Initializes a new instance of the class." +- **Avoid Content-Free Comments**: Do not simply repeat the name of the member (e.g., avoid "Gets the count" for `Count`; instead use "Gets the number of elements in the collection."). + +### 4. Analysis +- **Exceptions**: Analyze the method body to identify thrown exceptions and document them using `` tags. +- **Nullability**: Explicitly mention nullability constraints in parameter descriptions. + +### 5. Repository Constraints +- **Public APIs**: Do **not** generate inline XML documentation comments for `public` members of `public` types. These are documented via external XML files using `` tags. +- **Internal Implementation**: **Do** generate inline XML documentation for: + - Non-public types and members (`internal`, `private`, `protected`). + - `public` members within non-public types (e.g. a `public` method inside an `internal` class). + +**Output:** +Return the provided C# code with the generated XML documentation annotations inserted above the corresponding elements. Maintain existing indentation and code structure. \ No newline at end of file diff --git a/.github/prompts/generate-prompt.prompt.md b/.github/prompts/generate-prompt.prompt.md new file mode 100644 index 0000000000..a4aa8cd40f --- /dev/null +++ b/.github/prompts/generate-prompt.prompt.md @@ -0,0 +1,107 @@ +--- +name: generate-prompt +description: Generates high-quality VS Code Copilot prompt files (.prompt.md) based on user descriptions, leveraging available skills. +argument-hint: Describe the prompt you want to create (e.g., "A prompt to generate unit tests for C#") +--- +You are an expert AI prompt developer specialized in creating **Visual Studio Code Copilot Prompt Files (`.prompt.md`)**. + +Your goal is to generate a comprehensive, well-structured `.prompt.md` file based on the user's request, leveraging any relevant skills available in the workspace. + +Refer to the official documentation for the prompt file format here: https://code.visualstudio.com/docs/copilot/customization/prompt-files + +## Available Skills + +Before generating the prompt, review the available skills in the `.github/skills/` directory. Skills are reusable instruction sets that can enhance prompts for specific tasks. + +**To discover skills:** +1. List the contents of `.github/skills/` to find available skill directories +2. Read the `SKILL.md` file in each relevant skill directory to understand its purpose +3. Reference applicable skills in the generated prompt using the `#skill:` syntax + +**Current skills directory**: [.github/skills/](.github/skills/) + +## Instructions + +1. **Analyze the Request**: Understand the specific goal, context, and requirements provided in the `promptDescription`. + +2. **Discover Relevant Skills**: + * Search `.github/skills/` for skills that could enhance the prompt + * Read the `description` field in each skill's YAML frontmatter to determine relevance + * A skill is relevant if its purpose aligns with any part of the prompt's task + +3. **Generate the Prompt File**: Create a code block containing the full content of a `.prompt.md` file. + * **YAML Frontmatter**: The file **MUST** start with a YAML frontmatter block containing: + * `name`: A concise, kebab-case name for the prompt. + * `description`: A clear, short description of what the prompt does. + * `argument-hint`: (Optional) A hint for what arguments the user can provide when using the prompt. + * **Body Structure**: + * **Role**: Define the AI's persona (e.g., "You are an expert C# developer..."). + * **Context**: Include specific context instructions or references. + * **Skills**: If relevant skills were found, include a skills section that references them. + * **Task**: Clear steps or rules for the AI to follow. + * **Output Format**: Define how the result should look. + +4. **Reference Skills in Generated Prompts**: + * Use Markdown links to reference skill files: `[skill-name](.github/skills/skill-name/SKILL.md)` + * Instruct the prompt to "Follow the instructions in the referenced skill" when applicable + * Skills can be referenced for sub-tasks within a larger prompt + +5. **Use Variables**: + * Use `${input:variableName}` for user inputs (e.g., `${input:methodName}`). + * Use built-in variables like `${selection}`, `${file}`, or `${workspaceFolder}` where appropriate context is needed. + +6. **Best Practices**: + * Be specific and explicit. + * Encourage chain-of-thought reasoning if the task is complex. + * Reference workspace files using Markdown links `[path/to/file.cs](path/to/file.cs)` only if they are static and necessary for *all* invocations of this prompt. + * Prefer referencing skills over duplicating instructions that already exist in skills. + +## Example Output Structure (with skill reference) + +```markdown +--- +name: my-new-prompt +description: specialized task description +argument-hint: input parameter hint +--- +You are a specialized agent for... + +## Context +... + +## Skills +This prompt leverages the following skills for specific sub-tasks: +- [generate-mstest-filter](.github/skills/generate-mstest-filter/SKILL.md) - For generating test filter expressions + +## Instructions +1. ... +2. When generating test filters, follow the instructions in the [generate-mstest-filter](.github/skills/generate-mstest-filter/SKILL.md) skill. +3. ... + +## Variables +Use ${input:param1} to... +``` + +## Example Output Structure (without skills) + +```markdown +--- +name: my-new-prompt +description: specialized task description +argument-hint: input parameter hint +--- +You are a specialized agent for... + +## Context +... + +## Instructions +1. ... +2. ... + +## Variables +Use ${input:param1} to... +``` + +## User Request +${input:promptDescription} diff --git a/.github/prompts/generate-skill.prompt.md b/.github/prompts/generate-skill.prompt.md new file mode 100644 index 0000000000..484fe2debc --- /dev/null +++ b/.github/prompts/generate-skill.prompt.md @@ -0,0 +1,124 @@ +--- +name: generate-skill +description: Generate a GitHub Copilot Agent Skill (SKILL.md) following best practices and official documentation +argument-hint: Describe the skill you want to create (e.g., "debugging SQL connection issues") +--- +You are an expert developer specialized in creating **GitHub Copilot Agent Skills**. + +Your goal is to generate a well-structured, effective `SKILL.md` file based on the user's description. + +## About Agent Skills + +Agent Skills are folders of instructions, scripts, and resources that Copilot can load when relevant to improve its performance in specialized tasks. They work with: +- Copilot coding agent +- GitHub Copilot CLI +- Agent mode in Visual Studio Code + +Skills are stored in: +- **Project skills**: `.github/skills//SKILL.md` +- **Personal skills**: `~/.copilot/skills//SKILL.md` + +## Skill File Requirements + +### YAML Frontmatter (Required) +- **name** (required): A unique identifier for the skill. Must be lowercase, using hyphens for spaces. +- **description** (required): A description of what the skill does, and when Copilot should use it. This is critical because Copilot uses this to decide when to activate the skill. +- **license** (optional): A description of the license that applies to this skill. + +### Markdown Body +- Clear, actionable instructions for Copilot to follow +- Step-by-step processes when applicable +- Examples and guidelines +- References to tools, scripts, or resources in the skill directory + +## Best Practices + +1. **Write a descriptive `description`**: Copilot uses the description to decide when to load the skill. Include trigger phrases like "Use this when asked to..." or "Use this skill for..." + +2. **Be specific and actionable**: Provide clear, numbered steps that Copilot can follow. Avoid vague instructions. + +3. **Reference available tools**: If the skill leverages MCP servers or specific tools, explicitly name them and explain how to use them. + +4. **Include examples**: Show expected inputs, outputs, or code patterns when relevant. + +5. **Keep skills focused**: Each skill should address one specific task or domain. Use multiple skills for distinct tasks. + +6. **Use imperative language**: Write instructions as commands (e.g., "Use the X tool to...", "Check if...", "Generate a..."). + +7. **Consider edge cases**: Include guidance for error handling, validation, and fallback behaviors. + +8. **Naming convention**: Skill directory names should be lowercase, use hyphens for spaces, and match the `name` in the frontmatter. + +## Output Format + +Generate the complete content for a `SKILL.md` file, including: +1. YAML frontmatter with `name` and `description` (and optionally `license`) +2. Markdown body with clear instructions + +Also provide: +- The recommended directory path for the skill +- Any additional files (scripts, examples) that should be included in the skill directory + +## User Request + +${input:skillDescription} + +## Instructions + +1. **Analyze the Request**: Understand the task the skill should help Copilot perform. + +2. **Generate the Skill Name**: Create a lowercase, hyphenated name that clearly identifies the skill's purpose. + +3. **Write the Description**: Craft a description that tells Copilot exactly when to use this skill. Include trigger phrases. + +4. **Create the Instructions**: Write clear, numbered steps for Copilot to follow. Be specific about: + - What tools or commands to use + - What information to gather + - What output to produce + - How to handle errors or edge cases + +5. **Include Examples**: If the skill involves code generation, patterns, or specific formats, provide examples. + +6. **Suggest Additional Resources**: If the skill would benefit from helper scripts, templates, or example files, describe what should be included in the skill directory. + +## Example Output Structure + +```markdown +--- +name: skill-name-here +description: Description of what the skill does. Use this when asked to [specific trigger]. +--- + +Brief introduction to the skill's purpose. + +## When to Use This Skill + +- Condition 1 +- Condition 2 + +## Instructions + +1. First step with specific details +2. Second step with tool references +3. Third step with expected outcomes + +## Examples + +### Example 1: [Scenario] +```code +example code or pattern +``` + +## Error Handling + +- If X occurs, do Y +- If Z fails, try W +``` + +--- + +**Recommended Directory**: `.github/skills//` + +**Additional Files**: +- `script.ps1` - Helper script for X +- `template.md` - Template for Y diff --git a/.github/prompts/refine-test-overlap.prompt.md b/.github/prompts/refine-test-overlap.prompt.md new file mode 100644 index 0000000000..824e18f145 --- /dev/null +++ b/.github/prompts/refine-test-overlap.prompt.md @@ -0,0 +1,40 @@ +--- +name: test-minimize-overlap +description: Run coverage overlap analysis and suggest test suite optimizations +argument-hint: Test filter (e.g. FullyQualifiedName~MyTests) or describe the tests you want to analyze +--- +You are an expert .NET Test Engineer specialized in optimizing test coverage and reducing technical debt. + +## Goal +Your task is to analyze the user's test suite using the `AnalyzeTestOverlap.ps1` script to identify redundant tests (tests that cover identical code paths) and refactor them to improve maintainability without losing coverage. + +## Skills +This prompt leverages the following skills for specific sub-tasks: +- [generate-mstest-filter](../skills/generate-mstest-filter/SKILL.md) - For generating well-formed MSTest filter expressions + +## Tools +You have access to the analysis script at `[AnalyzeTestOverlap.ps1](./scripts/AnalyzeTestOverlap.ps1)`. + +## Workflow +1. **Parse or Generate Test Filter**: + * If `${input:filter}` is a valid MSTest filter expression (e.g., `FullyQualifiedName~MyTests`), use it directly. + * If `${input:filter}` is a loose description (e.g., "connection tests" or "SqlCommand class"), follow the instructions in the [generate-mstest-filter](../skills/generate-mstest-filter/SKILL.md) skill to generate a proper filter expression. + * If `${input:filter}` is empty, ask the user for a test filter or description to target specific tests. + +2. **Run Analysis**: + * Run the script using the filter: `.\scripts\AnalyzeTestOverlap.ps1 -Filter ""`. + * *Note*: The script produces a console summary and a `test-coverage-analysis.json` file. + +3. **Review Overlap**: + * Read the console output to spot "HIGH OVERLAP" warnings. + * If detailed inspection is needed, read `test-coverage-analysis.json` to see specific line mappings. + +4. **Refactor**: + * For overlapping tests, examine the actual C# test files. + * Strategies for reducing redundancy: + * **Merge**: If tests check the same logic with different inputs, convert them into a single `[Theory]` with `[InlineData]`. + * **Delete**: If a test is a strict subset of another (and provides no unique documentation value), propose deleting it. + * **Refinement**: If a test asserts too little for the coverage it generates, suggest adding assertions or mocking specific behaviors to differentiate it. + +## User Input +Test Filter: ${input:filter} diff --git a/.github/prompts/scripts/AnalyzeTestOverlap.ps1 b/.github/prompts/scripts/AnalyzeTestOverlap.ps1 new file mode 100644 index 0000000000..d888824196 --- /dev/null +++ b/.github/prompts/scripts/AnalyzeTestOverlap.ps1 @@ -0,0 +1,164 @@ +<# +.SYNOPSIS + Analyzes test coverage overlap between unit tests. + +.DESCRIPTION + This script performs the following actions: + 1. Sets up the Environment: Automatically installs 'dotnet-coverage' as a local tool if it's missing. + 2. Lists Tests: Uses 'dotnet test --list-tests' with the provided filter to identify which tests to run. + 3. Collects Granular Coverage: Runs each test individually wrapped in 'dotnet-coverage' to ensure completely isolated coverage data. + 4. Parses & Analyzes: Parses the resulting XML coverage files to extract the exact file paths and line ranges covered. + 5. Generates Output: + - JSON Report: Saves a detailed JSON file (test-coverage-analysis.json) mapping tests to executed lines. + - Console Summary: Prints a human-readable summary highlighting tests with high overlap (>90%). + +.EXAMPLE + # Run for a specific set of tests (Recommended) + .\AnalyzeTestOverlap.ps1 -Filter "FullyQualifiedName~ConnectionEnhancedRoutingTests" + +.EXAMPLE + # Run for all tests (Warning: Slow, as it runs each test in a separate process) + .\AnalyzeTestOverlap.ps1 -Filter "*" +#> + +param( + [string]$Filter = "FullyQualifiedName~ConnectionEnhancedRoutingTests", + [string]$Framework = "net462", + [string]$Project = "src\Microsoft.Data.SqlClient\tests\UnitTests\Microsoft.Data.SqlClient.UnitTests.csproj", + [string]$Output = "test-coverage-analysis.json" +) + +$ErrorActionPreference = "Stop" + +Write-Host "Checking for dotnet-coverage..." +try { + dotnet tool run dotnet-coverage --version | Out-Null +} catch { + Write-Host "Installing dotnet-coverage..." + dotnet tool install dotnet-coverage --create-manifest-if-needed +} + +Write-Host "Building project..." +dotnet build $Project --framework $Framework --configuration Debug | Out-Null + +Write-Host "Listing tests..." +$tests = dotnet test $Project --list-tests --framework $Framework --filter $Filter | Select-String " " | ForEach-Object { $_.ToString().Trim() } + +if ($tests.Count -eq 0) { + Write-Error "No tests found with filter '$Filter'" +} + +# Group tests by base method name to handle parameterized tests +# xUnit outputs "Namespace.Class.Method(param: value)" +# We want to run "Namespace.Class.Method" once to avoid filter quoting issues and aggregate coverage +$uniqueTestMethods = $tests | ForEach-Object { + if ($_ -match "^(.+?)\(.*\)$") { + $matches[1] + } else { + $_ + } +} | Select-Object -Unique + +Write-Host "Found $($tests.Count) test cases, aggregated to $($uniqueTestMethods.Count) test methods." + +$results = @{} +$tempDir = Join-Path $PSScriptRoot "TempCoverage" +if (Test-Path $tempDir) { Remove-Item $tempDir -Recurse -Force } +New-Item -ItemType Directory -Path $tempDir | Out-Null + +foreach ($test in $uniqueTestMethods) { + Write-Host "Running $test..." + # Sanitize test name for filename + $safeTestName = $test -replace '[^a-zA-Z0-9\._-]', '_' + $coverageFile = Join-Path $tempDir "$safeTestName.xml" + + # Run test with coverage + # We use -f xml to get xml output directly + # We use 'dotnet dotnet-coverage' (without 'tool run') as verified to work likely due to path/global install fallback + # Using ~ (Contains) for FullyQualifiedName handles both exact match and parameterized variants + $testFilter = "FullyQualifiedName~$test" + $innerCmd = "dotnet test `"$Project`" --filter `"$testFilter`" --framework $Framework --no-build" + # Note: passing dotnet-coverage as the command to the dotnet driver + $coverageArgs = @("dotnet-coverage", "collect", "-f", "xml", "-o", $coverageFile, $innerCmd) + + try { + & dotnet $coverageArgs | Out-Null + } catch { + Write-Warning "Failed to run coverage for ${test}: $_" + } + + if (Test-Path $coverageFile) { + # Parse XML + [xml]$xml = Get-Content $coverageFile + + # Map source file IDs to Paths + $sourceMap = @{} + $xml.results.modules.module.source_files.source_file | ForEach-Object { + $sourceMap[$_.id] = $_.path + } + + $coveredLines = @() + + # Get covered ranges + if ($xml.results.modules.module.functions.function) { + foreach ($func in $xml.results.modules.module.functions.function) { + if ($func.ranges.range) { + foreach ($range in $func.ranges.range) { + if ($range.covered -eq "yes" -or $range.covered -eq "partially") { + $filePath = $sourceMap[$range.source_id] + $coveredLines += "$($filePath):$($range.start_line)-$($range.end_line)" + } + } + } + } + } + + $results[$test] = $coveredLines + Write-Host " Collected $($coveredLines.Count) covered ranges." + } else { + Write-Warning "No coverage file generated for $test" + } +} + +# Clean up +if (Test-Path $tempDir) { Remove-Item $tempDir -Recurse -Force } + +# Output to JSON +$json = $results | ConvertTo-Json -Depth 5 +Set-Content $Output $json +Write-Host "Analysis saved to $Output" + +# Perform basic overlap analysis +Write-Host "`nGenerating overlap summary..." +$testNames = $results.Keys | Sort-Object +foreach ($t1 in $testNames) { + $lines1 = $results[$t1] + if ($null -eq $lines1 -or $lines1.Count -eq 0) { continue } + $set1 = New-Object System.Collections.Generic.HashSet[string] + $lines1 | ForEach-Object { $set1.Add($_) | Out-Null } + + foreach ($t2 in $testNames) { + if ($t1 -ge $t2) { continue } # Avoid self and duplicate comparisons + + $lines2 = $results[$t2] + if ($null -eq $lines2 -or $lines2.Count -eq 0) { continue } + + $intersection = 0 + foreach ($line in $lines2) { + if ($set1.Contains($line)) { + $intersection++ + } + } + + $overlapPct1 = ($intersection / $lines1.Count) * 100 + $overlapPct2 = ($intersection / $lines2.Count) * 100 + + if ($overlapPct1 -gt 90 -and $overlapPct2 -gt 90) { + Write-Host "HIGH OVERLAP: $t1 <-> $t2" + Write-Host " Shared lines: $intersection" + Write-Host " $t1 overlap: $([math]::Round($overlapPct1, 2))%" + Write-Host " $t2 overlap: $([math]::Round($overlapPct2, 2))%" + Write-Host "" + } + } +} diff --git a/.github/skills/generate-mstest-filter/SKILL.md b/.github/skills/generate-mstest-filter/SKILL.md new file mode 100644 index 0000000000..8441823347 --- /dev/null +++ b/.github/skills/generate-mstest-filter/SKILL.md @@ -0,0 +1,233 @@ +--- +name: generate-mstest-filter +description: Generates well-formed MSTest filter expressions for dotnet test. Use this skill when asked to create a test filter, run specific tests, filter tests by name, class, category, or priority, or when the user describes tests they want to run selectively. +--- + +This skill generates MSTest filter expressions for use with `dotnet test --filter`. Use this when users describe tests they want to run and need a properly formatted filter expression. + +## When to Use This Skill + +- User asks to run specific tests by name or pattern +- User wants to filter tests by class name, namespace, or method +- User wants to run tests with specific categories or priorities +- User describes a set of tests to include or exclude +- User needs help with test filter syntax for MSTest + +## MSTest Filter Syntax Reference + +### Supported Properties + +| Property | Description | Supported By | +|----------|-------------|--------------| +| `FullyQualifiedName` | Full namespace + class + method name (e.g., `Namespace.Class.Method`) | MSTest, xUnit, NUnit | +| `DisplayName` | The display name of the test (often same as FullyQualifiedName for xUnit) | MSTest, xUnit, NUnit | +| `Name` | Test method name only | MSTest only (not xUnit) | +| `ClassName` | Full namespace + class name (must include namespace) | MSTest only (not xUnit) | +| `Priority` | Priority attribute value (integer) | MSTest (with `[Priority]` attribute) | +| `TestCategory` | TestCategory attribute value (string) | MSTest (with `[TestCategory]` attribute) | + +> **Important**: For xUnit tests (common in .NET Core projects), **always use `FullyQualifiedName` or `DisplayName`**. The `Name` and `ClassName` properties are not populated by xUnit and will result in no matches. + +### Operators + +| Operator | Meaning | Example | +|----------|---------|---------| +| `=` | Exact match | `Name=TestMethod1` | +| `!=` | Not exact match | `Name!=TestMethod1` | +| `~` | Contains | `FullyQualifiedName~Connection` | +| `!~` | Does not contain | `FullyQualifiedName!~Integration` | + +### Boolean Operators + +| Operator | Meaning | Example | +|----------|---------|---------| +| `\|` | OR | `FullyQualifiedName~Test1\|FullyQualifiedName~Test2` | +| `&` | AND | `FullyQualifiedName~MyClass&Priority=1` | +| `()` | Grouping | `(FullyQualifiedName~Test1\|FullyQualifiedName~Test2)&Priority=1` | + +## Instructions + +1. **Analyze the user's description** to identify: + - Test names, patterns, or keywords mentioned + - Class names or namespaces referenced + - Categories or priorities specified + - Whether tests should be included or excluded + - Whether the user referenced a **file name** (look for `.cs` extension or file path patterns) + +2. **Handle file name inputs**: + - If the user provides a file name (e.g., `ChannelDbConnectionPoolTest.cs`), extract the class name by removing the `.cs` extension + - File names typically correspond to the test class name (e.g., `SqlConnectionTest.cs` → class `SqlConnectionTest`) + - Use `FullyQualifiedName~ClassName` pattern for file-based inputs + +3. **Choose the appropriate property**: + - **For xUnit tests (most .NET Core projects)**: Always use `FullyQualifiedName` or `DisplayName` + - **For MSTest only**: `Name` and `ClassName` properties are also available + - Use `FullyQualifiedName~` with contains operator for maximum compatibility + - Use `TestCategory` for category-based filtering (MSTest only) + - Use `Priority` for priority-based filtering (MSTest only) + +4. **Select the correct operator**: + - Use `~` (contains) for partial matches and patterns - **this is the safest default** + - Use `=` for exact matches only when you know the full value + - Use `!=` or `!~` for exclusions + +5. **Combine conditions** as needed: + - Use `|` (OR) when any condition should match + - Use `&` (AND) when all conditions must match + - Use parentheses `()` for complex logic + +6. **Format the output** as a complete `dotnet test` command: + ``` + dotnet test --filter "" + ``` + +7. **Handle special characters**: + - Escape `!` with `\!` on Linux/macOS shells + - Use `%2C` for commas in generic type parameters + - URL-encode special characters in Name/DisplayName values + +## Examples + +### Example 1: Run tests containing a keyword +**User says**: "Run all connection tests" +```bash +dotnet test --filter "FullyQualifiedName~Connection" +``` + +### Example 2: Run tests in a specific class (xUnit compatible) +**User says**: "Run tests in SqlConnectionTest class" +```bash +dotnet test --filter "FullyQualifiedName~SqlConnectionTest" +``` +> **Note**: Avoid using `ClassName=` for xUnit tests - it won't work. Always use `FullyQualifiedName~` for cross-framework compatibility. + +### Example 3: Run tests from a specific file +**User says**: "Run tests in ChannelDbConnectionPoolTest.cs" +```bash +dotnet test --filter "FullyQualifiedName~ChannelDbConnectionPoolTest" +``` +> Strip the `.cs` extension and use `FullyQualifiedName~` with the class name. + +### Example 4: Run a specific test method +**User says**: "Run the TestOpenConnection test" +```bash +dotnet test --filter "FullyQualifiedName~TestOpenConnection" +``` +> Use `FullyQualifiedName~` instead of `Name=` for xUnit compatibility. + +### Example 5: Run tests by category (MSTest only) +**User says**: "Run all tests in CategoryA" +```bash +dotnet test --filter "TestCategory=CategoryA" +``` + +### Example 6: Run high priority tests (MSTest only) +**User says**: "Run priority 1 tests" +```bash +dotnet test --filter "Priority=1" +``` + +### Example 7: Combine multiple conditions (AND) +**User says**: "Run connection tests that are priority 1" +```bash +dotnet test --filter "FullyQualifiedName~Connection&Priority=1" +``` + +### Example 8: Combine multiple conditions (OR) +**User says**: "Run tests for SqlConnection or SqlCommand" +```bash +dotnet test --filter "FullyQualifiedName~SqlConnection|FullyQualifiedName~SqlCommand" +``` + +### Example 9: Exclude tests +**User says**: "Run all tests except integration tests" +```bash +dotnet test --filter "FullyQualifiedName!~Integration" +``` + +### Example 10: Complex filter with grouping +**User says**: "Run connection or command tests that are in CategoryA" +```bash +dotnet test --filter "(FullyQualifiedName~Connection|FullyQualifiedName~Command)&TestCategory=CategoryA" +``` + +### Example 11: Exclude specific test method +**User says**: "Run all tests except TestSlowOperation" +```bash +dotnet test --filter "FullyQualifiedName!~TestSlowOperation" +``` + +### Example 12: Multiple exclusions +**User says**: "Run tests but skip integration and performance tests" +```bash +dotnet test --filter "FullyQualifiedName!~Integration&FullyQualifiedName!~Performance" +``` + +## Verification (Required) + +**Always verify the generated filter before presenting it to the user.** Use the `--list-tests` flag to confirm the filter matches the expected tests: + +```bash +dotnet test --list-tests --filter "" --framework +``` + +### Verification Steps + +1. **Run the list-tests command** with the generated filter +2. **Check the output**: + - If tests are listed → filter is valid + - If "No test matches the given testcase filter" → filter is invalid, needs adjustment +3. **If no matches**, try these fixes in order: + - Switch from `ClassName=` or `Name=` to `FullyQualifiedName~` + - Remove the namespace prefix and use just the class/method name with `~` + - Try `DisplayName~` as an alternative +4. **Re-run verification** after any changes + +### Example Verification + +```bash +# Generate filter for "ChannelDbConnectionPoolTest" class +dotnet test tests/UnitTests/UnitTests.csproj --list-tests --filter "FullyQualifiedName~ChannelDbConnectionPoolTest" --framework net9.0 + +# Expected output shows matching tests: +# The following Tests are available: +# Microsoft.Data.SqlClient.UnitTests.ConnectionPool.ChannelDbConnectionPoolTest.GetConnectionEmptyPool_ShouldCreateNewConnection(...) +# ... +``` + +## Error Handling + +- If the user's description is ambiguous, ask for clarification about: + - Whether they want exact match or contains + - The full class name or namespace if needed + - Whether conditions should be AND or OR + +- **If a filter returns no matches**: + - First, verify the test class/method exists in the project + - Switch to `FullyQualifiedName~` with contains operator + - Check if the project uses xUnit (common in .NET Core) - if so, avoid `Name` and `ClassName` properties + +- For complex filters, validate that parentheses are balanced + +## Additional Tips + +- An expression without any operator is interpreted as `FullyQualifiedName~` + - Example: `dotnet test --filter xyz` equals `dotnet test --filter "FullyQualifiedName~xyz"` + +- All lookups are case-insensitive + +- When running on Linux/macOS, escape `!` with backslash: `\!` + +- For project-specific tests, add the project path: + ```bash + dotnet test path/to/project.csproj --filter "FullyQualifiedName~MyTest" + ``` + +## Common Pitfalls + +| Problem | Cause | Solution | +|---------|-------|----------| +| No test matches filter | Using `Name=` or `ClassName=` with xUnit | Use `FullyQualifiedName~` instead | +| No test matches filter | Using just class name without namespace in `ClassName=` | Use `FullyQualifiedName~ClassName` | +| Filter matches too many tests | Using overly broad `~` pattern | Add more specific qualifiers or use `&` with additional conditions | +| TestCategory filter doesn't work | Project uses xUnit, which doesn't support TestCategory | Use `[Trait]` attributes with xUnit and filter by trait name | From 7a5ae5855961ab52acca39c63074aaa595091aa0 Mon Sep 17 00:00:00 2001 From: Malcolm Daigle Date: Mon, 9 Feb 2026 11:01:07 -0800 Subject: [PATCH 2/4] Add dotnet-coverage to tool manifest. --- dotnet-tools.json | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 dotnet-tools.json diff --git a/dotnet-tools.json b/dotnet-tools.json new file mode 100644 index 0000000000..69ba4502e5 --- /dev/null +++ b/dotnet-tools.json @@ -0,0 +1,13 @@ +{ + "version": 1, + "isRoot": true, + "tools": { + "dotnet-coverage": { + "version": "18.3.2", + "commands": [ + "dotnet-coverage" + ], + "rollForward": false + } + } +} \ No newline at end of file From 32a8802a58ea525d3c105d390bf05a3c8c1f2757 Mon Sep 17 00:00:00 2001 From: Malcolm Daigle Date: Tue, 10 Feb 2026 08:33:19 -0800 Subject: [PATCH 3/4] Add mcp file. --- .gitignore | 3 ++- .vscode/mcp.json | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 .vscode/mcp.json diff --git a/.gitignore b/.gitignore index bb6f58f9bd..36843668bd 100644 --- a/.gitignore +++ b/.gitignore @@ -41,7 +41,8 @@ Generated\ Files/ **/.AssemblyAttributes # Visual Studio Code settings -.vscode/ +.vscode/* +!.vscode/mcp.json # MSTest test Results [Tt]est[Rr]esult*/ diff --git a/.vscode/mcp.json b/.vscode/mcp.json new file mode 100644 index 0000000000..570f425ca4 --- /dev/null +++ b/.vscode/mcp.json @@ -0,0 +1,32 @@ +{ + "servers": { + "eng-copilot": { + "url": "https://sql-mcp.eng-copilot.net/default/mcp/", + "type": "http" + }, + "icm": { + "url": "https://icm-mcp-prod.azure-api.net/v1/", + "type": "http" + }, + "github": { + "type": "http", + "url": "https://api.githubcopilot.com/mcp/" + }, + "ado": { + "type": "stdio", + "command": "npx", + "args": [ + "-y", + "@azure-devops/mcp", + "${input:ado_org}" + ] + } + }, + "inputs": [ + { + "id": "ado_org", + "type": "promptString", + "description": "Azure DevOps organization name (e.g. 'contoso')" + } + ] +} \ No newline at end of file From 765544d53e8e2f6a08ae1121f7cc42c097fa207a Mon Sep 17 00:00:00 2001 From: Malcolm Daigle Date: Wed, 11 Feb 2026 11:26:36 -0800 Subject: [PATCH 4/4] Review changes. --- .../prompts/scripts/AnalyzeTestOverlap.ps1 | 28 +++++++++++++++++-- .vscode/mcp.json | 23 ++++++++------- 2 files changed, 36 insertions(+), 15 deletions(-) diff --git a/.github/prompts/scripts/AnalyzeTestOverlap.ps1 b/.github/prompts/scripts/AnalyzeTestOverlap.ps1 index d888824196..17f1916161 100644 --- a/.github/prompts/scripts/AnalyzeTestOverlap.ps1 +++ b/.github/prompts/scripts/AnalyzeTestOverlap.ps1 @@ -16,13 +16,35 @@ # Run for a specific set of tests (Recommended) .\AnalyzeTestOverlap.ps1 -Filter "FullyQualifiedName~ConnectionEnhancedRoutingTests" +.PARAMETER Filter + The dotnet test filter expression used to select which tests to analyze. + Supports MSTest filter syntax (e.g., "FullyQualifiedName~ClassName" or + "TestCategory=Unit"). Use "*" to include all tests (warning: very slow as + each test runs in a separate process). Default is + "" (analyze all tests). + +.PARAMETER Framework + The target framework moniker (TFM) to build and run tests against. Must + match a valid in the test project (e.g., "net462", + "net9.0"). Default is "net462". + +.PARAMETER Project + The relative path to the test project (.csproj) to analyze. Default is + the SqlClient unit tests project. + +.PARAMETER Output + The file path where the JSON coverage analysis report will be saved. The + report maps each test method to the source file lines it covers. Default + is "test-coverage-analysis.json". + .EXAMPLE # Run for all tests (Warning: Slow, as it runs each test in a separate process) .\AnalyzeTestOverlap.ps1 -Filter "*" #> -param( - [string]$Filter = "FullyQualifiedName~ConnectionEnhancedRoutingTests", +param +( + [string]$Filter = "", [string]$Framework = "net462", [string]$Project = "src\Microsoft.Data.SqlClient\tests\UnitTests\Microsoft.Data.SqlClient.UnitTests.csproj", [string]$Output = "test-coverage-analysis.json" @@ -69,7 +91,7 @@ New-Item -ItemType Directory -Path $tempDir | Out-Null foreach ($test in $uniqueTestMethods) { Write-Host "Running $test..." # Sanitize test name for filename - $safeTestName = $test -replace '[^a-zA-Z0-9\._-]', '_' + $safeTestName = $test -replace '[^a-zA-Z0-9\.-]', '_' $coverageFile = Join-Path $tempDir "$safeTestName.xml" # Run test with coverage diff --git a/.vscode/mcp.json b/.vscode/mcp.json index 570f425ca4..89a091dc1a 100644 --- a/.vscode/mcp.json +++ b/.vscode/mcp.json @@ -1,8 +1,14 @@ { "servers": { - "eng-copilot": { - "url": "https://sql-mcp.eng-copilot.net/default/mcp/", - "type": "http" + "bluebird-mcp-sqlclient": { + "url": "https://mcp.bluebird-ai.net/", + "type": "http", + "headers": { + "x-mcp-ec-organization": "SqlClientDrivers", + "x-mcp-ec-project": "ADO.NET", + "x-mcp-ec-repository": "dotnet-sqlclient", + "x-mcp-ec-branch": "internal/main" + } }, "icm": { "url": "https://icm-mcp-prod.azure-api.net/v1/", @@ -18,15 +24,8 @@ "args": [ "-y", "@azure-devops/mcp", - "${input:ado_org}" + "SqlClientDrivers" ] } - }, - "inputs": [ - { - "id": "ado_org", - "type": "promptString", - "description": "Azure DevOps organization name (e.g. 'contoso')" - } - ] + } } \ No newline at end of file