diff --git a/lib/metrics.go b/lib/metrics.go index 24af9a2..a8ccb77 100644 --- a/lib/metrics.go +++ b/lib/metrics.go @@ -14,9 +14,9 @@ import ( // Global metrics instance to track performance during load testing var metrics = PerformanceMetrics{ - MinLatency: time.Duration(math.MaxInt64), - ResponseCounters: make(map[int]int32), - RequestLatencies: make([]RequestMetric, 0), + MinLatency: time.Duration(math.MaxInt64), + ResponseCounters: make(map[int]int32), + RequestLatencies: make([]RequestMetric, 0), } // UpdateMetrics records metrics for each individual request @@ -25,52 +25,53 @@ var metrics = PerformanceMetrics{ // - resp: HTTP response from the request // - second: elapsed seconds since the start of the test func UpdateMetrics(duration time.Duration, resp *http.Response, second int) { - metrics.Mu.Lock() - defer metrics.Mu.Unlock() + metrics.Mu.Lock() + defer metrics.Mu.Unlock() - // Create and store individual request metric - metric := RequestMetric{ - Timestamp: time.Now(), - Duration: duration, - StatusCode: resp.StatusCode, - Verb: resp.Request.Method, - } - metrics.RequestLatencies = append(metrics.RequestLatencies, metric) + // Create and store individual request metric + metric := RequestMetric{ + Timestamp: time.Now(), + Duration: duration, + StatusCode: resp.StatusCode, + Verb: resp.Request.Method, + } + metrics.RequestLatencies = append(metrics.RequestLatencies, metric) - // Update aggregate metrics - metrics.TotalRequests++ - metrics.TotalLatency += duration + // Update aggregate metrics + metrics.TotalRequests++ + metrics.TotalLatency += duration - // Update max/min latencies - if duration > metrics.MaxLatency { - metrics.MaxLatency = duration - } - if duration < metrics.MinLatency { - metrics.MinLatency = duration - } + // Update max/min latencies + if duration > metrics.MaxLatency { + metrics.MaxLatency = duration + } + if duration < metrics.MinLatency { + metrics.MinLatency = duration + } - // Track successful responses - if resp.StatusCode == http.StatusOK { - metrics.TotalResponses++ - metrics.ResponseCounters[second]++ - } + // Track successful responses + if resp.StatusCode == http.StatusOK { + metrics.TotalResponses++ + metrics.ResponseCounters[second]++ + } } // calculatePercentile calculates the nth percentile of latencies // Parameters: // - latencies: sorted slice of request durations // - percentile: desired percentile (e.g., 50 for p50, 95 for p95) +// // Returns: the duration at the specified percentile func calculatePercentile(latencies []time.Duration, percentile float64) time.Duration { - if len(latencies) == 0 { - return 0 - } + if len(latencies) == 0 { + return 0 + } - index := int(math.Ceil((percentile/100.0)*float64(len(latencies)))) - 1 - if index < 0 { - index = 0 - } - return latencies[index] + index := int(math.Ceil((percentile/100.0)*float64(len(latencies)))) - 1 + if index < 0 { + index = 0 + } + return latencies[index] } // CalculateAndPrintMetrics generates and saves comprehensive test results @@ -80,139 +81,137 @@ func calculatePercentile(latencies []time.Duration, percentile float64) time.Dur // - endpoint: URL being tested // - patterns: request patterns used in the test func CalculateAndPrintMetrics(startTime time.Time, requestsPerSecond float64, endpoint string, patterns []RequestPattern) { - // Small delay to ensure all metrics are captured - time.Sleep(100 * time.Millisecond) + // Small delay to ensure all metrics are captured + time.Sleep(100 * time.Millisecond) - metrics.Mu.Lock() - defer metrics.Mu.Unlock() + metrics.Mu.Lock() + defer metrics.Mu.Unlock() - // Save detailed per-request metrics to CSV - saveDetailedMetrics() + // Prepare latencies for percentile calculations + latencies := make([]time.Duration, len(metrics.RequestLatencies)) + for i, metric := range metrics.RequestLatencies { + latencies[i] = metric.Duration + } + sort.Slice(latencies, func(i, j int) bool { + return latencies[i] < latencies[j] + }) - // Prepare latencies for percentile calculations - latencies := make([]time.Duration, len(metrics.RequestLatencies)) - for i, metric := range metrics.RequestLatencies { - latencies[i] = metric.Duration - } - sort.Slice(latencies, func(i, j int) bool { - return latencies[i] < latencies[j] - }) + // Calculate various percentiles + p50 := calculatePercentile(latencies, 50) + p95 := calculatePercentile(latencies, 95) + p99 := calculatePercentile(latencies, 99) - // Calculate various percentiles - p50 := calculatePercentile(latencies, 50) - p95 := calculatePercentile(latencies, 95) - p99 := calculatePercentile(latencies, 99) + // Calculate average latency + averageLatency := time.Duration(0) + if metrics.TotalRequests > 0 { + averageLatency = metrics.TotalLatency / time.Duration(metrics.TotalRequests) + } - // Calculate average latency - averageLatency := time.Duration(0) - if metrics.TotalRequests > 0 { - averageLatency = metrics.TotalLatency / time.Duration(metrics.TotalRequests) - } + // Calculate total duration and responses + totalDuration := time.Since(startTime).Seconds() + totalResponses := int32(0) + for _, count := range metrics.ResponseCounters { + totalResponses += count + } - // Calculate total duration and responses - totalDuration := time.Since(startTime).Seconds() - totalResponses := int32(0) - for _, count := range metrics.ResponseCounters { - totalResponses += count - } + // Reset min latency if unused + if metrics.MinLatency == time.Duration(math.MaxInt64) { + metrics.MinLatency = 0 + } - // Reset min latency if unused - if metrics.MinLatency == time.Duration(math.MaxInt64) { - metrics.MinLatency = 0 - } + // Build detailed results string + results := fmt.Sprintf("Load Test Report\n=============\n\n") + results += fmt.Sprintf("Endpoint: %s\n", endpoint) + results += fmt.Sprintf("Pattern: ") - // Build detailed results string - results := fmt.Sprintf("Load Test Report\n=============\n\n") - results += fmt.Sprintf("Endpoint: %s\n", endpoint) - results += fmt.Sprintf("Pattern: ") + // Format request patterns + for i, p := range patterns { + if i > 0 { + results += " → " + } + if p.Percentage > 0 && p.Percentage < 100 { + results += fmt.Sprintf("%.0f%%%s", p.Percentage, strings.ToLower(p.Verb[:1])) + } else { + results += fmt.Sprintf("%d%s", p.Sequence, strings.ToLower(p.Verb[:1])) + } + } + results += "\n\n" - // Format request patterns - for i, p := range patterns { - if i > 0 { - results += " → " - } - if p.Percentage > 0 && p.Percentage < 100 { - results += fmt.Sprintf("%.0f%%%s", p.Percentage, strings.ToLower(p.Verb[:1])) - } else { - results += fmt.Sprintf("%d%s", p.Sequence, strings.ToLower(p.Verb[:1])) - } - } - results += "\n\n" + // Add performance metrics + results += fmt.Sprintf("Performance Metrics\n") + results += fmt.Sprintf("-----------------\n") + results += fmt.Sprintf("Total Requests Sent: %d\n", metrics.TotalRequests) + results += fmt.Sprintf("Total Responses Received: %d\n", totalResponses) + results += fmt.Sprintf("Average Latency: %s\n", averageLatency) + results += fmt.Sprintf("Max Latency: %s\n", metrics.MaxLatency) + results += fmt.Sprintf("Min Latency: %s\n", metrics.MinLatency) + results += fmt.Sprintf("Requests/sec (Target): %.2f\n", requestsPerSecond) + results += fmt.Sprintf("Requests/sec (Actual): %.2f\n", float64(metrics.TotalRequests)/totalDuration) + results += fmt.Sprintf("Responses/sec: %.2f\n", float64(totalResponses)/totalDuration) - // Add performance metrics - results += fmt.Sprintf("Performance Metrics\n") - results += fmt.Sprintf("-----------------\n") - results += fmt.Sprintf("Total Requests Sent: %d\n", metrics.TotalRequests) - results += fmt.Sprintf("Total Responses Received: %d\n", totalResponses) - results += fmt.Sprintf("Average Latency: %s\n", averageLatency) - results += fmt.Sprintf("Max Latency: %s\n", metrics.MaxLatency) - results += fmt.Sprintf("Min Latency: %s\n", metrics.MinLatency) - results += fmt.Sprintf("Requests/sec (Target): %.2f\n", requestsPerSecond) - results += fmt.Sprintf("Requests/sec (Actual): %.2f\n", float64(metrics.TotalRequests)/totalDuration) - results += fmt.Sprintf("Responses/sec: %.2f\n", float64(totalResponses)/totalDuration) + // Add percentile information + results += fmt.Sprintf("\nLatency Percentiles\n") + results += fmt.Sprintf("-----------------\n") + results += fmt.Sprintf("50th percentile (p50): %s\n", p50) + results += fmt.Sprintf("95th percentile (p95): %s\n", p95) + results += fmt.Sprintf("99th percentile (p99): %s\n", p99) - // Add percentile information - results += fmt.Sprintf("\nLatency Percentiles\n") - results += fmt.Sprintf("-----------------\n") - results += fmt.Sprintf("50th percentile (p50): %s\n", p50) - results += fmt.Sprintf("95th percentile (p95): %s\n", p95) - results += fmt.Sprintf("99th percentile (p99): %s\n", p99) - - fmt.Println(results) - saveReport(results) + fmt.Println(results) + saveReport(results) + saveDetailedMetrics() } // saveDetailedMetrics writes per-request metrics to a CSV file // The CSV includes timestamp, duration, status code, and request type for each request func saveDetailedMetrics() { - resultsDir := ".reports" - os.MkdirAll(resultsDir, os.ModePerm) + resultsDir := ".reports" + os.MkdirAll(resultsDir, os.ModePerm) - csvFile := filepath.Join(resultsDir, fmt.Sprintf("detailed_metrics_%d.csv", time.Now().Unix())) - file, err := os.Create(csvFile) - if err != nil { - fmt.Println("Error creating CSV file:", err) - return - } - defer file.Close() + csvFile := filepath.Join(resultsDir, fmt.Sprintf("detailed_metrics_%d.csv", time.Now().Unix())) + file, err := os.Create(csvFile) + if err != nil { + fmt.Println("Error creating CSV file:", err) + return + } + defer file.Close() - writer := csv.NewWriter(file) - defer writer.Flush() + writer := csv.NewWriter(file) + defer writer.Flush() - // Write CSV header - writer.Write([]string{ - "Timestamp", - "Duration (ms)", - "Status Code", - "Request Type", - }) + // Write CSV header + writer.Write([]string{ + "Timestamp", + "Duration (ms)", + "Status Code", + "Request Type", + }) - // Write individual request metrics - for _, metric := range metrics.RequestLatencies { - writer.Write([]string{ - metric.Timestamp.Format(time.RFC3339), - fmt.Sprintf("%.2f", float64(metric.Duration.Milliseconds())), - fmt.Sprintf("%d", metric.StatusCode), - metric.Verb, - }) - } + // Write individual request metrics + for _, metric := range metrics.RequestLatencies { + writer.Write([]string{ + metric.Timestamp.Format(time.RFC3339), + fmt.Sprintf("%.2f", float64(metric.Duration.Milliseconds())), + fmt.Sprintf("%d", metric.StatusCode), + metric.Verb, + }) + } - fmt.Println("Detailed metrics saved:", csvFile) + fmt.Println("Detailed metrics saved:", csvFile) } // saveReport writes the summary report to a text file // Parameters: // - results: formatted string containing the complete test results func saveReport(results string) { - resultsDir := ".reports" - os.MkdirAll(resultsDir, os.ModePerm) + resultsDir := ".reports" + os.MkdirAll(resultsDir, os.ModePerm) - resultsFile := filepath.Join(resultsDir, fmt.Sprintf("summary_%d.txt", time.Now().Unix())) + resultsFile := filepath.Join(resultsDir, fmt.Sprintf("summary_%d.txt", time.Now().Unix())) - if err := os.WriteFile(resultsFile, []byte(results), 0644); err != nil { - fmt.Println("Error saving report:", err) - return - } + if err := os.WriteFile(resultsFile, []byte(results), 0644); err != nil { + fmt.Println("Error saving report:", err) + return + } - fmt.Println("Summary report saved:", resultsFile) + fmt.Println("Summary report saved:", resultsFile) }