const fs = require('fs'); const path = require('path'); const { v4: uuidv4 } = require('uuid'); const mime = require('mime-types'); const yargs = require('yargs/yargs'); const { hideBin } = require('yargs/helpers'); // --- Configuration & Defaults --- // Default values used if not provided via command line arguments or parsed from the HTML. const DEFAULT_HTML_FILE = 'artillery_report.html'; const DEFAULT_APP_NAME = 'Default Application'; const DEFAULT_RUN_ID = `RunID_${Date.now()}`; const outputDir = './allure-results'; // Standard directory for Allure-compatible JSON files. // --- Parse Command-Line Arguments --- // Uses yargs to define and parse CLI arguments for file path, app name, and run ID. const argv = yargs(hideBin(process.argv)) .option('html', { alias: 'f', type: 'string', description: 'Path to the input HTML report file', default: DEFAULT_HTML_FILE }) .option('appName', { alias: 'a', type: 'string', description: 'Override Application Name (Parent Suite)' }) .option('runId', { alias: 'r', type: 'string', description: 'Override Run ID (Suite Name)' }) .help() .argv; // --- Core Logic: Allure Metadata --- /** * Writes the executor.json file to provide a meaningful report title in the Allure header. * This fixes the "unknown" display on the Allure Overview page by providing build info. * @param {string} outputDir - The directory to save the JSON file. * @param {string} appName - The name of the application. * @param {string} runId - The unique ID for the test run. */ function writeExecutorInfo(outputDir, appName, runId) { const executorPath = path.join(outputDir, 'executor.json'); const executorData = { "name": "Artillery to Allure Converter", "type": "performance-tool", "url": "https://artillery.io/", "buildName": `${appName} (${runId})`, // Name displayed in the Allure header "reportName": `Performance Test Report: ${appName}`, "reportUrl": "" }; // Writes the executor JSON file to the allure-results directory. fs.writeFileSync(executorPath, JSON.stringify(executorData, null, 2), 'utf8'); } // --- Core Logic: HTML Parsing & Utilities --- /** * Converts human-readable duration string (e.g., "1h 1m 30s") to milliseconds. * This is used to calculate the start and stop times for the overall summary test case. * @param {string} durationStr - The duration string from the HTML report. * @returns {number} Total duration in milliseconds. */ function parseDurationToMs(durationStr) { if (!durationStr) return 0; let totalMs = 0; // Regex matches hours (e.g., 1h), minutes (e.g., 1m), seconds (e.g., 30s) const parts = durationStr.match(/(\d+)([hms])/g); if (parts) { parts.forEach(part => { const num = parseInt(part.slice(0, -1), 10); const unit = part.slice(-1); if (unit === 'h') { totalMs += num * 3600 * 1000; } else if (unit === 'm') { totalMs += num * 60 * 1000; } else if (unit === 's') { totalMs += num * 1000; } }); } return totalMs; } /** * Parses the HTML for AppName, RunID, and initial header metrics using specific HTML structure regex. * It prioritizes CLI arguments over values found in the HTML. * @param {string} htmlContent - The full content of the HTML report. * @param {object} args - The command line arguments. * @returns {object} Contains the final appName, runId, and all collected header metrics. */ function parseHeaderInfo(htmlContent, args) { let appName = args.appName || DEFAULT_APP_NAME; let runId = args.runId || DEFAULT_RUN_ID; // Regex to find all key-value pairs in the initial header card (e.g., <strong>Key:</strong><div>Value</div>) const headerMetricRegex = /<strong>([^<]+):<\/strong><div>([^<]+)<\/div>/g; const headerData = {}; let match; while ((match = headerMetricRegex.exec(htmlContent)) !== null) { const key = match[1].trim(); const value = match[2].trim(); headerData[key] = value; } // Assign parsed values, prioritizing CLI args appName = args.appName || headerData['App Name'] || DEFAULT_APP_NAME; // Use the Start time to generate a unique run ID if not provided, ensuring it's safe for file names. const startTime = headerData['Start'] || 'Unknown_Start_Time'; runId = args.runId || `TestRun_${startTime.replace(/[^a-zA-Z0-9]/g, '_')}`; // Return all collected header data along with appName and runId return { appName, runId, headerData }; } /** * Programmatically extracts and formats Overall Test Metrics into a concise, multi-column HTML table. * This HTML table will be used as the description for the "Overall Performance Summary" Allure test case. * @param {string} htmlContent - The full content of the HTML report. * @param {string} runId - The run ID. * @param {string} appName - The application name. * @param {object} headerData - The metrics parsed from the header. * @returns {string} The HTML string for the professional metrics table. */ function formatOverallMetrics(htmlContent, runId, appName, headerData) { // 1. Initialize data with header information const data = { 'Application Name': appName, 'Test Run ID': runId, }; // Merge essential header data Object.assign(data, headerData); // 2. Extract key-value pairs from the Single-Digit Metrics card // This regex attempts to isolate the section containing the main single-digit metrics. const metricCardRegex = /<h5 class='mb-3'>š Overall Single-Digit Metrics[\s\S]*?(<h5 class='mb-3'>š Transaction Summary Table)/s; const metricCardMatch = htmlContent.match(metricCardRegex); const metricCardHtml = metricCardMatch ? metricCardMatch[0].replace(metricCardMatch[1], '').trim() : ''; // Extract individual metrics using class names const singleMetricRegex = /<div class='key-metric-label'>([^<]+)<\/div>\s*<div class='key-metric-value'>([^<]+)<\/div>/g; let match; while ((match = singleMetricRegex.exec(htmlContent)) !== null) { const key = match[1].trim(); let value = match[2].trim(); // Clean up key names for better display and consistency const cleanedKey = key .replace(/Overall Avg RT \(Weighted Mean\)/, 'Overall Avg RT (P50)') .replace(/Avg RPS \(Throughput\)/, 'Avg RPS') .replace(/ \(Worst Case\)/g, ''); // Handle "Total Requests" vs "Total Transactions" for consistency if (cleanedKey === 'Total Requests') { value = data['Total Transactions'] || value; } data[cleanedKey] = value; } // 3. Define the concise grouping and display order for the final HTML table const groups = [ { title: 'General Information & Scope', keys: ['Application Name', 'Test Run ID', 'Start', 'End', 'Duration', 'Total Requests'], }, { title: 'Overall Response Time (ms)', keys: ['Overall Avg RT (P50)', 'Overall P90 RT', 'Overall P95 RT', 'Max RT (Test Max)'], }, { title: 'Throughput & Success Response Time (ms)', keys: ['Avg RPS', 'Max RPS (Peak Rate)', '2xx Avg RT (P50)', '2xx P95 RT'], } ]; // 4. Generate Professional Multi-Column HTML Table (2 metrics side-by-side per row) // The styles are inline to ensure they render correctly within the Allure report description. const keyStyle = `font-weight: 500; color: #555; width: 25%; padding: 6px 10px; border-right: 1px solid #eee;`; const valueStyle = `font-weight: 700; width: 25%; padding: 6px 10px;`; const headerStyle = `background-color: #eef2f5; font-weight: 700; padding: 8px 10px; border-bottom: 2px solid #ddd;`; let html = ` <h3 style="margin-top: 20px;">Overall Performance Metrics</h3> <table style="width:100%; border-collapse: collapse; font-size: 14px; text-align: left; border: 1px solid #ddd; border-radius: 8px; overflow: hidden;"> <tbody> `; groups.forEach(group => { // Group Title Row html += ` <tr style="border-top: 1px solid #ddd;"> <td colspan="4" style="${headerStyle}">${group.title}</td> </tr> `; const validKeys = group.keys.filter(key => data[key] !== undefined); const totalItems = validKeys.length; // Loop through keys, putting two key/value pairs in each row for (let i = 0; i < totalItems; i += 2) { const key1 = validKeys[i]; const key2 = validKeys[i + 1]; html += `<tr>`; // Metric 1 (Key | Value) html += `<td style="${keyStyle}">${key1}</td>`; html += `<td style="${valueStyle}">${data[key1]}</td>`; // Metric 2 (Key | Value) - only if it exists if (key2) { html += `<td style="${keyStyle}">${key2}</td>`; html += `<td style="${valueStyle}">${data[key2]}</td>`; } else { // Fill remaining space if the last row has only one item html += `<td colspan="2" style="border: none;"></td>`; } html += `</tr>`; } }); html += `</tbody></table>`; return html; } /** * Parses the Transaction Summary Table from the HTML content. * It extracts key metrics and determines the Allure status (passed, failed, broken) * for each transaction based on P95 SLA check, TPH check, and error count. * @param {string} htmlContent - The full content of the HTML report. * @returns {Array<object>} An array of transaction metric objects. */ function parseTransactionSummary(htmlContent) { const metrics = []; // 1. Locate and Extract Transaction Summary Table Body using regex const tableRegex = /<table[^>]*>[\s\S]*?<thead>[\s\S]*?<\/thead>[\s\S]*?<tbody>([\s\S]*?)<\/tbody>[\s\S]*?<\/table>/; const tableMatch = htmlContent.match(tableRegex); if (!tableMatch || !tableMatch[1]) { return metrics; } const tableBodyHtml = tableMatch[1]; // Split into rows and then parse cells in each row const rows = tableBodyHtml.trim().split('</tr>').filter(row => row.includes('<td')).map(row => row.trim()); const cellRegex = /<td[^>]*>(.*?)<\/td>/g; rows.forEach(row => { const cells = []; let match; // Extract all cell contents while ((match = cellRegex.exec(row)) !== null) { cells.push(match[1].trim()); } // Ensure we have 13 columns for the transaction metrics (indices 0 to 12) if (cells.length >= 13) { const trxName = cells[0]; const slaStatus = cells[12]; const slaP95 = parseFloat(cells[1]); const p95Actual = parseFloat(cells[6]); const expectedTph = parseInt(cells[8]); // Expected_TPH is at index 8 const totalCount = parseInt(cells[9]); // Total Count is at index 9 const failCount = parseInt(cells[11]); let status = 'passed'; let statusDetails = ''; let failedChecks = []; // Array to store all reasons for failure/broken status // --- SLA Check 1: P95 Response Time --- if (slaStatus.toLowerCase().includes('not met')) { // P95 SLA Breach = FAILED status = 'failed'; failedChecks.push(`P95 RT (${p95Actual.toFixed(1)}ms) EXCEEDED SLA (${slaP95.toFixed(1)}ms)`); } // --- SLA Check 2: Throughput (Expected TPH vs. Actual Count) --- // If Expected_TPH is set (> 0) and the Actual Count is lower than expected, it's a failure. if (expectedTph > 0 && totalCount < expectedTph) { // Throughput SLA Breach = FAILED status = 'failed'; // Elevate status to failed failedChecks.push(`Throughput NOT MET! Actual Count (${totalCount}) < Expected TPH (${expectedTph})`); } // --- Error Check: Transactions that failed for other reasons --- if (failCount > 0 && status !== 'failed') { // Errors present, but SLAs met = BROKEN (Warning status) // 'Broken' is used for an issue that isn't a direct test failure (like an exception/error) // This is only set if the status isn't already 'failed' from an SLA breach. status = 'broken'; failedChecks.push(`WARNING! ${failCount} errors reported.`); } if (failedChecks.length > 0) { // Combine all failure/warning reasons into the final status message statusDetails = `${trxName}: FAILED/BROKEN due to: ${failedChecks.join('; ')}`; } else { statusDetails = `${trxName}: Passed All Checks.`; } metrics.push({ trxName: trxName, slaP95: slaP95.toFixed(1), p50: parseFloat(cells[2]).toFixed(1), min: parseFloat(cells[3]).toFixed(1), max: parseFloat(cells[4]).toFixed(1), p90: parseFloat(cells[5]).toFixed(1), p95: p95Actual.toFixed(1), expectedTph: expectedTph, // New TPH metric totalCount: totalCount, // Total transactions processed passCount: parseInt(cells[10]), failCount: failCount, slaStatusText: slaStatus, status: status, // Final status based on all checks statusDetails: statusDetails }); } }); return metrics; } /** * Generates the Allure Transaction Summary Table HTML with SLA Status at the end. * This table will be part of the "Overall Performance Summary" description in Allure. * @param {Array<object>} finalMetrics - The array of parsed transaction metric objects. * @returns {string} The HTML string for the transaction summary table. */ function generateSummaryTableHtml(finalMetrics) { let html = ` <h3 style="margin-top: 20px;">Transaction Summary Table</h3> <table style="width:100%; border-collapse: collapse; font-size: 14px; text-align: center;"> <thead style="background-color:#f2f2f2;"> <tr> <th style="border: 1px solid #ddd; padding: 8px; text-align: left;">TrxName</th> <th style="border: 1px solid #ddd; padding: 8px;">P50 RT (ms)</th> <th style="border: 1px solid #ddd; padding: 8px;">Min RT (ms)</th> <th style="border: 1px solid #ddd; padding: 8px;">Max RT (ms)</th> <th style="border: 1px solid #ddd; padding: 8px;">P90 RT (ms)</th> <th style="border: 1px solid #ddd; padding: 8px;">P95 (ms)</th> <th style="border: 1px solid #ddd; padding: 8px;">SLA (P95 ms)</th> <th style="border: 1px solid #ddd; padding: 8px;">Expected TPH</th> <th style="border: 1px solid #ddd; padding: 8px;">Total Trx</th> <th style="border: 1px solid #ddd; padding: 8px;">Pass Trx</th> <th style="border: 1px solid #ddd; padding: 8px;">Fail Trx</th> <th style="border: 1px solid #ddd; padding: 8px;">Status</th> </tr> </thead> <tbody> `; finalMetrics.forEach(m => { // Define color styles based on overall status (combines both SLA checks) const statusColor = m.status === 'passed' ? 'background-color: #d4edda; color: #155724;' : m.status === 'failed' ? 'background-color: #f8d7da; color: #721c24;' : 'background-color: #fff3cd; color: #856404;'; // broken status is yellow // Check if TPH was breached for highlighting const isTphBreached = m.expectedTph > 0 && m.totalCount < m.expectedTph; html += ` <tr> <td style="border: 1px solid #ddd; padding: 8px; text-align: left;">${m.trxName}</td> <td style="border: 1px solid #ddd; padding: 8px;">${m.p50}</td> <td style="border: 1px solid #ddd; padding: 8px;">${m.min}</td> <td style="border: 1px solid #ddd; padding: 8px;">${m.max}</td> <td style="border: 1px solid #ddd; padding: 8px;">${m.p90}</td> <td style="border: 1px solid #ddd; padding: 8px; ${m.slaStatusText === 'Not Met' ? 'font-weight: bold; color: red;' : ''}">${m.p95}</td> <td style="border: 1px solid #ddd; padding: 8px;">${m.slaP95}</td> <td style="border: 1px solid #ddd; padding: 8px; ${isTphBreached ? 'font-weight: bold; color: red;' : ''}">${m.expectedTph}</td> <td style="border: 1px solid #ddd; padding: 8px;">${m.totalCount}</td> <td style="border: 1px solid #ddd; padding: 8px;">${m.passCount}</td> <td style="border: 1px solid #ddd; padding: 8px; ${m.failCount > 0 ? 'font-weight: bold; color: red;' : ''}">${m.failCount}</td> <td style="border: 1px solid #ddd; padding: 8px; ${statusColor}">${m.status.toUpperCase()}</td> </tr> `; }); html += `</tbody></table>`; return html; } // --- Main execution logic --- try { // 1. Ensure directories exist: outputDir for results, and attachmentsDir for the original HTML. const attachmentsDir = path.join(outputDir, 'attachments'); [outputDir, attachmentsDir].forEach(dir => { if (!fs.existsSync(dir)) { fs.mkdirSync(dir, { recursive: true }); } }); // 2. Load and Parse HTML Report const INPUT_HTML_FILE = argv.html; if (!fs.existsSync(INPUT_HTML_FILE)) { throw new Error(`Input HTML report not found: ${INPUT_HTML_FILE}. Check the path or ensure the Artillery test ran successfully.`); } const rawHtml = fs.readFileSync(INPUT_HTML_FILE, 'utf8'); // Parse App Name, Run ID, and initial header metrics from the HTML content const { appName, runId, headerData } = parseHeaderInfo(rawHtml, argv); // Parse transaction metrics, including the new TPH SLA checks const metrics = parseTransactionSummary(rawHtml); // --- Duration Logic for Overall Summary Test --- // The summary test should reflect the actual duration of the whole test run. const durationStr = headerData['Duration'] || '0s'; const runDurationMs = parseDurationToMs(durationStr); // Calculate start time by subtracting the run duration from the current time (stop time). const overallStopTimeMs = Date.now(); const overallStartTimeMs = overallStopTimeMs - runDurationMs; // ----------------------------------------------- if (metrics.length === 0) { // Handle case where no transaction data could be parsed (e.g., test failed early or report format changed). const failureHtml = `<p style="color:red; font-weight:bold;">ERROR: No transaction summary table was found in the HTML report. The test may have failed to complete successfully.</p>`; const professionalOverallMetricsHtml = formatOverallMetrics(rawHtml, runId, appName, headerData); const summaryTest = { uuid: uuidv4(), name: `Overall Performance Summary`, fullName: `${appName}.${runId}-Summary`, status: 'broken', // Set to 'broken' as parsing failed stage: "finished", start: overallStartTimeMs, stop: overallStopTimeMs, descriptionHtml: professionalOverallMetricsHtml + failureHtml, labels: [ { name: "parentSuite", value: appName }, { name: "suite", value: runId }, { name: "subSuite", value: "Summary" }, { name: "feature", value: appName }, { name: "story", value: runId }, ], steps: [{name: "Failed to parse transaction data.", status: "broken", stage: "finished"}], attachments: [] }; const testFileName = `${summaryTest.uuid}-result.json`; fs.writeFileSync(path.join(outputDir, testFileName), JSON.stringify(summaryTest, null, 2), 'utf8'); console.log(`\nā ļø Generated BROKEN Allure result. Could not parse Transaction Summary Table from HTML.`); return; } // Format the Overall Test Metrics using the new professional function const professionalOverallMetricsHtml = formatOverallMetrics(rawHtml, runId, appName, headerData); // 3. Prepare Attachment // Copy the original HTML report into the 'attachments' folder for direct linking in Allure. const htmlAttachmentName = path.basename(INPUT_HTML_FILE); const attachmentSourcePath = path.join(attachmentsDir, htmlAttachmentName); fs.copyFileSync(INPUT_HTML_FILE, attachmentSourcePath); // 4. Construct the Final Description HTML (Overall Metrics + Transaction Table) const finalDescriptionHtml = ` ${professionalOverallMetricsHtml} ${generateSummaryTableHtml(metrics)} <hr> <p>For the full dashboard with all charts and detailed data, see the "Full Performance Dashboard (HTML)" attachment below.</p> `; // 5. Create Overall Summary Test Case // The overall status is 'failed' if *any* transaction metric is 'failed' (RT or TPH SLA breached). const overallStatus = metrics.some(m => m.status === 'failed') ? 'failed' : metrics.some(m => m.status === 'broken') ? 'broken' : 'passed'; const summaryTest = { uuid: uuidv4(), name: `Overall Performance Summary`, fullName: `${appName}.${runId}-Summary`, status: overallStatus, stage: "finished", start: overallStartTimeMs, // Uses actual run start stop: overallStopTimeMs, // Uses actual run stop descriptionHtml: finalDescriptionHtml, // Contains the formatted metrics and table labels: [ // Allure hierarchy labels { name: "parentSuite", value: appName }, { name: "suite", value: runId }, { name: "subSuite", value: "Overall" }, // Labels for Allure filter grouping { name: "feature", value: appName }, { name: "story", value: runId }, { name: "epic", value: "Performance Dashboard" }, ], // Converts each transaction into a 'step' within the overall summary test case. steps: metrics.map(m => { return { name: `${m.trxName} | P95: ${m.p95}ms (SLA: ${m.slaP95}ms) | TPH: ${m.totalCount} (Exp: ${m.expectedTph})`, status: m.status, // Uses transaction status stage: "finished", statusDetails: { message: m.statusDetails } }; }), attachments: [ // Link to the original HTML report { name: "Full Performance Dashboard (HTML)", type: mime.lookup('html'), source: `attachments/${htmlAttachmentName}` }, ] }; // 6. Create Individual Transaction Test Cases // Each transaction is represented as a separate Allure test result. const transactionTests = metrics.map(m => { // --- Synthetic Duration Logic --- // P90 response time is used as a synthetic duration to represent the typical latency. const p90RtMs = parseFloat(m.p90); // Set the 'start' time to the current timestamp and calculate 'stop' time using P90 RT. const synthStartTime = Date.now(); const synthStopTime = Math.round(synthStartTime + p90RtMs); // --- Metric Value for Display --- const labelValue = `P90: ${m.p90}ms | TPH: ${m.totalCount} / ${m.expectedTph}`; // --- Synthetic Class Name to satisfy Allure's strict format --- const syntheticClassName = `com.perftest.ArtilleryTransaction`; return { uuid: uuidv4(), name: `${m.trxName} (RT SLA:${m.slaP95}ms, TPH SLA:${m.expectedTph})`, fullName: `${appName}.${runId}-${m.trxName}`, status: m.status, // Uses individual transaction status (passed/failed/broken) stage: "finished", start: synthStartTime, stop: synthStopTime, // Simple description using markdown (Updated to include TPH) description: `**P95 Response Time SLA**: ${m.p95}ms (SLA: ${m.slaP95}ms)\n**Throughput SLA**: Actual ${m.totalCount} (Expected TPH: ${m.expectedTph})\n**Total Count**: ${m.totalCount}`, statusDetails: { message: m.statusDetails, trace: `P95 Status: ${m.slaStatusText}. TPH Check: ${m.totalCount >= m.expectedTph ? 'Met' : 'Not Met'}. Fail Count: ${m.failCount}.` }, labels: [ // Allure hierarchy labels for filtering { name: "parentSuite", value: appName }, { name: "suite", value: runId }, { name: "subSuite", value: "Transactions" }, { name: "feature", value: appName }, { name: "story", value: runId }, { name: "package", value: appName }, { name: "thread", value: labelValue }, { name: "testClass", value: syntheticClassName }, ], // Detailed steps to show all SLA and error checks steps: [ { name: `Check P95 RT SLA: ${m.p95}ms <= ${m.slaP95}ms`, status: m.slaStatusText === 'Met' ? 'passed' : 'failed', stage: "finished" }, { name: `Check Throughput SLA: Actual ${m.totalCount} >= Expected TPH ${m.expectedTph}`, // Status is failed if TPH is set (>0) and actual count is less than expected status: (m.expectedTph > 0 && m.totalCount < m.expectedTph) ? 'failed' : 'passed', stage: "finished" }, { name: `Check Error Count: ${m.failCount} failed transactions`, status: m.failCount === 0 ? 'passed' : 'broken', stage: "finished" } ], }; }); // 7. Write Allure Result Files const allTests = [summaryTest, ...transactionTests]; allTests.forEach(test => { const testFileName = `${test.uuid}-result.json`; // Write the result file for each test case fs.writeFileSync(path.join(outputDir, testFileName), JSON.stringify(test, null, 2), 'utf8'); }); // 8. Write Test Case Container and Executor Info // A container links test results to form a single suite/run in Allure. const container = { uuid: uuidv4(), name: runId, children: allTests.map(t => t.uuid) // List of all test result UUIDs }; fs.writeFileSync(path.join(outputDir, `${container.uuid}-container.json`), JSON.stringify(container, null, 2), 'utf8'); // Fixes the "unknown" display on the Allure Overview page writeExecutorInfo(outputDir, appName, runId); console.log(`\nā Successfully generated ${allTests.length} Allure result files for ${appName} (RunID: ${runId})`); console.log(`Test Duration: ${durationStr} (${runDurationMs}ms)`); console.log(`To view the report, run: allure serve ${outputDir}`); } catch (e) { console.error(`\nā An error occurred during Allure report generation: ${e.message}`); process.exit(1); }