1
0
Fork 0

Added documentation + detailed logging

This commit is contained in:
Atridad Lahiji 2024-12-04 17:45:27 -06:00
parent 5dad2d5275
commit 078791ee64
Signed by: atridad
SSH key fingerprint: SHA256:LGomp8Opq0jz+7kbwNcdfTcuaLRb5Nh0k5AchDDb438
3 changed files with 241 additions and 124 deletions

View file

@ -1,6 +1,15 @@
# Loadr
A lightweight REST load testing tool with robust support for different request patterns, token auth, and performance reports.
A lightweight REST load testing tool with flexible request patterns, token auth, and detailed performance metrics output.
## Stack
- Core: Pure Golang
- Output: Console + CSV/TXT reporting
- Pattern support: Sequential and probabilistic request patterns
- Transport: Standard Go HTTP client
## Requirements
- Golang 1.22.0+
## Installation
@ -11,58 +20,85 @@ go build
## Quick Start
```bash
# Simple pattern: 5 POST requests
# Simple pattern: 5 POST requests at 20 req/sec
./loadr -rate=20 -max=100 -url=http://api.example.com -pattern=5p
# Mixed pattern: 1 POST followed by 5 GETs, repeating
./loadr -rate=20 -max=100 -url=http://api.example.com -pattern=1p5g
# With authentication and request body
# Probabilistic pattern: 20% POSTs, 80% GETs
./loadr -rate=20 -max=100 -url=http://api.example.com -pattern=20%p80%g
# With auth and request body
./loadr -rate=20 -max=100 -url=http://api.example.com -pattern=2p3g -json=./data.json -token=YourBearerToken
```
## Request Patterns
The `-pattern` flag supports flexible request patterns:
The `-pattern` flag supports three types of request patterns:
### Simple Patterns
- `5p` : 5 POST requests
- `3g` : 3 GET requests
- Default is `1g` if no pattern specified
### Sequential Patterns
- `1p5g` : 1 POST followed by 5 GETs
- `1p5g` : 1 POST followed by 5 GETs, repeating
- `2p3g` : 2 POSTs followed by 3 GETs
- `3g2p` : 3 GETs followed by 2 POSTs
### Probabalistic Patterns
- `20%p80%g` : 20% POST and by 80% GETs
### Pattern Rules
- Numbers specify how many requests of each type
- 'p' or 'P' specifies POST requests
- 'g' or 'G' specifies GET requests
- '%' indicates probabilistic requests
- If no number is specified, 1 is assumed (e.g., "pg" = "1p1g")
- Pattern repeats until max requests is reached
### Probabilistic Patterns
- `20%p80%g` : Random selection with 20% POST and 80% GET probability
- Percentages must sum to 100
## Command Line Flags
- `-rate`: Number of requests per second (default: 10)
- `-max`: Maximum number of requests to send (default: 50)
- `-url`: Target URL (default: "https://example.com")
- `-pattern`: Request pattern (e.g., "5p", "1p5g", "3g2p", "10%p90%g")
- `-json`: Path to JSON file for request body
- `-token`: Bearer token for authorization
- `-v`, `-version`: Print version information
```bash
-rate Number of requests per second (default: 10)
-max Maximum number of requests to send (default: 50)
-url Target URL (default: "https://example.com")
-pattern Request pattern (e.g., "5p", "1p5g", "20%p80%g")
-json Path to JSON file for request body
-token Bearer token for authorization
-v Print version information
```
## Reports
## Performance Metrics
Test results are automatically:
1. Displayed in the console
2. Saved to `.reports/[timestamp].txt`
Loadr generates two types of reports:
Reports include:
- Total requests sent and received
### Summary Report (.reports/summary_[timestamp].txt)
- Total requests sent/received
- Average, maximum, and minimum latency
- Requests per second (sent and received)
- Requests/sec (target and actual)
- Latency percentiles (p50, p95, p99)
- Pattern information
### Detailed CSV (.reports/detailed_metrics_[timestamp].csv)
- Per-request timestamps
- Individual request latencies
- Status codes
- Request types
## Technical Details
### Rate Limiting
Loadr uses a time-based approach to approximate the target requests per second:
1. Calculates ideal interval between requests: `interval = 1second / requestsPerSecond`
2. Tracks next scheduled request time
3. Uses sleep to maintain timing between requests
4. Launches requests asynchronously to maintain timing accuracy
### Request Execution
- Uses a global HTTP client for connection reuse
- Requests are executed concurrently using goroutines
- Metrics are updated synchronously using mutex protection
### Metrics Collection
- Real-time tracking of request latencies
- Thread-safe counters for requests and responses
- Calculates percentiles from stored request durations
- Support for detailed CSV export for external analysis
Note: The actual request rate may vary slightly from the target rate due to system load and network conditions. The detailed CSV output can be used to analyze the actual timing distribution.

View file

@ -1,146 +1,218 @@
package lib
import (
"encoding/csv"
"fmt"
"math"
"net/http"
"os"
"path/filepath"
"sort"
"strings"
"time"
)
// Global metrics instance to track performance during load testing
var metrics = PerformanceMetrics{
// Initialize MinLatency to the maximum possible duration
MinLatency: time.Duration(math.MaxInt64),
// Initialize response counters map
ResponseCounters: make(map[int]int32),
MinLatency: time.Duration(math.MaxInt64),
ResponseCounters: make(map[int]int32),
RequestLatencies: make([]RequestMetric, 0),
}
// UpdateMetrics synchronously updates performance metrics for each request
// UpdateMetrics records metrics for each individual request
// Parameters:
// - duration: time taken to complete the request
// - resp: HTTP response from the request
// - second: elapsed seconds since the start of the test
func UpdateMetrics(duration time.Duration, resp *http.Response, second int) {
metrics.Mu.Lock()
defer metrics.Mu.Unlock()
metrics.Mu.Lock()
defer metrics.Mu.Unlock()
// Increment total requests
metrics.TotalRequests++
// Add current request's latency to total
metrics.TotalLatency += duration
// Create and store individual request metric
metric := RequestMetric{
Timestamp: time.Now(),
Duration: duration,
StatusCode: resp.StatusCode,
Verb: resp.Request.Method,
}
metrics.RequestLatencies = append(metrics.RequestLatencies, metric)
// Update maximum latency if current duration is higher
if duration > metrics.MaxLatency {
metrics.MaxLatency = duration
}
// Update minimum latency if current duration is lower
if duration < metrics.MinLatency {
metrics.MinLatency = duration
}
// Update aggregate metrics
metrics.TotalRequests++
metrics.TotalLatency += duration
// Track successful responses
if resp.StatusCode == http.StatusOK {
metrics.TotalResponses++
metrics.ResponseCounters[second]++
}
// Update max/min latencies
if duration > metrics.MaxLatency {
metrics.MaxLatency = duration
}
if duration < metrics.MinLatency {
metrics.MinLatency = duration
}
// Debug log of current metrics
fmt.Printf("Current metrics - Total Requests: %d, Total Responses: %d\n",
metrics.TotalRequests, metrics.TotalResponses)
// Track successful responses
if resp.StatusCode == http.StatusOK {
metrics.TotalResponses++
metrics.ResponseCounters[second]++
}
}
// CalculateAndPrintMetrics generates a comprehensive report of load test performance
// calculatePercentile calculates the nth percentile of latencies
// Parameters:
// - latencies: sorted slice of request durations
// - percentile: desired percentile (e.g., 50 for p50, 95 for p95)
// Returns: the duration at the specified percentile
func calculatePercentile(latencies []time.Duration, percentile float64) time.Duration {
if len(latencies) == 0 {
return 0
}
index := int(math.Ceil((percentile/100.0)*float64(len(latencies)))) - 1
if index < 0 {
index = 0
}
return latencies[index]
}
// CalculateAndPrintMetrics generates and saves comprehensive test results
// Parameters:
// - startTime: when the load test began
// - requestsPerSecond: target request rate
// - endpoint: URL being tested
// - patterns: request patterns used in the test
func CalculateAndPrintMetrics(startTime time.Time, requestsPerSecond float64, endpoint string, patterns []RequestPattern) {
// Small delay to ensure all metrics are captured
time.Sleep(100 * time.Millisecond)
// Small delay to ensure all metrics are captured
time.Sleep(100 * time.Millisecond)
metrics.Mu.Lock()
defer metrics.Mu.Unlock()
metrics.Mu.Lock()
defer metrics.Mu.Unlock()
// Calculate average latency
averageLatency := time.Duration(0)
if metrics.TotalRequests > 0 {
averageLatency = metrics.TotalLatency / time.Duration(metrics.TotalRequests)
}
// Save detailed per-request metrics to CSV
saveDetailedMetrics()
// Calculate total test duration and total responses
totalDuration := time.Since(startTime).Seconds()
totalResponses := int32(0)
for _, count := range metrics.ResponseCounters {
totalResponses += count
}
// Prepare latencies for percentile calculations
latencies := make([]time.Duration, len(metrics.RequestLatencies))
for i, metric := range metrics.RequestLatencies {
latencies[i] = metric.Duration
}
sort.Slice(latencies, func(i, j int) bool {
return latencies[i] < latencies[j]
})
// Ensure MinLatency is not left at its initial max value
if metrics.MinLatency == time.Duration(math.MaxInt64) {
metrics.MinLatency = 0
}
// Calculate various percentiles
p50 := calculatePercentile(latencies, 50)
p95 := calculatePercentile(latencies, 95)
p99 := calculatePercentile(latencies, 99)
// Build detailed results string
results := fmt.Sprintf("Load Test Report\n")
results += fmt.Sprintf("=============\n\n")
// Calculate average latency
averageLatency := time.Duration(0)
if metrics.TotalRequests > 0 {
averageLatency = metrics.TotalLatency / time.Duration(metrics.TotalRequests)
}
// Report endpoint and request pattern
results += fmt.Sprintf("Endpoint: %s\n", endpoint)
results += fmt.Sprintf("Pattern: ")
// Calculate total duration and responses
totalDuration := time.Since(startTime).Seconds()
totalResponses := int32(0)
for _, count := range metrics.ResponseCounters {
totalResponses += count
}
for i, p := range patterns {
if i > 0 {
results += " → "
}
// Reset min latency if unused
if metrics.MinLatency == time.Duration(math.MaxInt64) {
metrics.MinLatency = 0
}
var patternDesc string
if p.Percentage > 0 && p.Percentage < 100 {
// Probabilistic pattern (e.g., "20%p80%g")
patternDesc = fmt.Sprintf("%.0f%%%s", p.Percentage, strings.ToLower(p.Verb[:1]))
} else {
// Simple or sequential pattern (e.g., "5p", "3g", "1p5g")
patternDesc = fmt.Sprintf("%d%s", p.Sequence, strings.ToLower(p.Verb[:1]))
}
// Build detailed results string
results := fmt.Sprintf("Load Test Report\n=============\n\n")
results += fmt.Sprintf("Endpoint: %s\n", endpoint)
results += fmt.Sprintf("Pattern: ")
results += patternDesc
}
results += "\n\n"
// Format request patterns
for i, p := range patterns {
if i > 0 {
results += " → "
}
if p.Percentage > 0 && p.Percentage < 100 {
results += fmt.Sprintf("%.0f%%%s", p.Percentage, strings.ToLower(p.Verb[:1]))
} else {
results += fmt.Sprintf("%d%s", p.Sequence, strings.ToLower(p.Verb[:1]))
}
}
results += "\n\n"
// Detailed performance metrics
results += fmt.Sprintf("Performance Metrics\n")
results += fmt.Sprintf("-----------------\n")
results += fmt.Sprintf("Total Requests Sent: %d\n", metrics.TotalRequests)
results += fmt.Sprintf("Total Responses Received: %d\n", totalResponses)
results += fmt.Sprintf("Average Latency: %s\n", averageLatency)
results += fmt.Sprintf("Max Latency: %s\n", metrics.MaxLatency)
results += fmt.Sprintf("Min Latency: %s\n", metrics.MinLatency)
results += fmt.Sprintf("Requests/sec (Target): %.2f\n", requestsPerSecond)
results += fmt.Sprintf("Requests/sec (Actual): %.2f\n", float64(metrics.TotalRequests)/totalDuration)
results += fmt.Sprintf("Responses/sec: %.2f\n", float64(totalResponses)/totalDuration)
// Add performance metrics
results += fmt.Sprintf("Performance Metrics\n")
results += fmt.Sprintf("-----------------\n")
results += fmt.Sprintf("Total Requests Sent: %d\n", metrics.TotalRequests)
results += fmt.Sprintf("Total Responses Received: %d\n", totalResponses)
results += fmt.Sprintf("Average Latency: %s\n", averageLatency)
results += fmt.Sprintf("Max Latency: %s\n", metrics.MaxLatency)
results += fmt.Sprintf("Min Latency: %s\n", metrics.MinLatency)
results += fmt.Sprintf("Requests/sec (Target): %.2f\n", requestsPerSecond)
results += fmt.Sprintf("Requests/sec (Actual): %.2f\n", float64(metrics.TotalRequests)/totalDuration)
results += fmt.Sprintf("Responses/sec: %.2f\n", float64(totalResponses)/totalDuration)
// Print and save the report
fmt.Println(results)
saveReport(results)
// Add percentile information
results += fmt.Sprintf("\nLatency Percentiles\n")
results += fmt.Sprintf("-----------------\n")
results += fmt.Sprintf("50th percentile (p50): %s\n", p50)
results += fmt.Sprintf("95th percentile (p95): %s\n", p95)
results += fmt.Sprintf("99th percentile (p99): %s\n", p99)
fmt.Println(results)
saveReport(results)
}
// saveReport writes the load test results to a timestamped file in the .reports directory
// saveDetailedMetrics writes per-request metrics to a CSV file
// The CSV includes timestamp, duration, status code, and request type for each request
func saveDetailedMetrics() {
resultsDir := ".reports"
os.MkdirAll(resultsDir, os.ModePerm)
csvFile := filepath.Join(resultsDir, fmt.Sprintf("detailed_metrics_%d.csv", time.Now().Unix()))
file, err := os.Create(csvFile)
if err != nil {
fmt.Println("Error creating CSV file:", err)
return
}
defer file.Close()
writer := csv.NewWriter(file)
defer writer.Flush()
// Write CSV header
writer.Write([]string{
"Timestamp",
"Duration (ms)",
"Status Code",
"Request Type",
})
// Write individual request metrics
for _, metric := range metrics.RequestLatencies {
writer.Write([]string{
metric.Timestamp.Format(time.RFC3339),
fmt.Sprintf("%.2f", float64(metric.Duration.Milliseconds())),
fmt.Sprintf("%d", metric.StatusCode),
metric.Verb,
})
}
fmt.Println("Detailed metrics saved:", csvFile)
}
// saveReport writes the summary report to a text file
// Parameters:
// - results: formatted results string to be saved
// - results: formatted string containing the complete test results
func saveReport(results string) {
// Ensure .reports directory exists
resultsDir := ".reports"
os.MkdirAll(resultsDir, os.ModePerm)
resultsDir := ".reports"
os.MkdirAll(resultsDir, os.ModePerm)
// Create a unique filename based on current timestamp
resultsFile := filepath.Join(resultsDir, fmt.Sprintf("%d.txt", time.Now().Unix()))
resultsFile := filepath.Join(resultsDir, fmt.Sprintf("summary_%d.txt", time.Now().Unix()))
// Write results to file
if err := os.WriteFile(resultsFile, []byte(results), 0644); err != nil {
fmt.Println("Error saving report:", err)
return
}
if err := os.WriteFile(resultsFile, []byte(results), 0644); err != nil {
fmt.Println("Error saving report:", err)
return
}
fmt.Println("Report saved:", resultsFile)
fmt.Println("Summary report saved:", resultsFile)
}

View file

@ -15,6 +15,15 @@ type PerformanceMetrics struct {
MaxLatency time.Duration
MinLatency time.Duration
ResponseCounters map[int]int32
RequestLatencies []RequestMetric
}
// RequestMetric represents a single request's performance metrics
type RequestMetric struct {
Timestamp time.Time
Duration time.Duration
StatusCode int
Verb string
}
// RequestError represents a detailed error that occurs during an HTTP request