forked from aegis/pyserveX
- Added functionality to mark responses as cache hits to prevent incorrect X-Cache headers. - Introduced setCacheHitFlag function to traverse response writer wrappers and set cache hit flag. - Updated cachingResponseWriter to manage cache hit state and adjust X-Cache header accordingly. - Enhanced ProcessRequest and ProcessResponse methods to utilize new caching logic. feat(extension): Introduce ResponseWriterWrapper and ResponseFinalizer interfaces - Added ResponseWriterWrapper interface for extensions to wrap response writers. - Introduced ResponseFinalizer interface for finalizing responses after processing. refactor(manager): Improve response writer wrapping and finalization - Updated Manager.Handler to wrap response writers through all enabled extensions. - Implemented finalization of response writers after processing requests. test(caching): Add comprehensive integration tests for caching behavior - Created caching_test.go with tests for cache hit/miss, TTL expiration, pattern-based caching, and more. - Ensured that caching logic works correctly for various scenarios including query strings and error responses. test(routing): Add integration tests for routing behavior - Created routing_test.go with tests for route priority, case sensitivity, default routes, and return directives. - Verified that routing behaves as expected with multiple regex routes and named groups.
667 lines
17 KiB
Go
667 lines
17 KiB
Go
package integration
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"net/http"
|
|
"sync/atomic"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/konduktor/konduktor/internal/extension"
|
|
)
|
|
|
|
// ============== Basic Cache Hit/Miss Tests ==============
|
|
|
|
func TestCaching_BasicHitMiss(t *testing.T) {
|
|
var requestCount int64
|
|
|
|
backend := StartBackend(func(w http.ResponseWriter, r *http.Request) {
|
|
count := atomic.AddInt64(&requestCount, 1)
|
|
w.Header().Set("Content-Type", "application/json")
|
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
|
"request_number": count,
|
|
"timestamp": time.Now().UnixNano(),
|
|
})
|
|
})
|
|
defer backend.Close()
|
|
|
|
logger := createTestLogger(t)
|
|
|
|
// Create caching extension
|
|
cachingExt, err := extension.NewCachingExtension(map[string]interface{}{
|
|
"default_ttl": "1m",
|
|
"cache_patterns": []interface{}{
|
|
map[string]interface{}{
|
|
"pattern": "^/api/.*",
|
|
"ttl": "30s",
|
|
"methods": []interface{}{"GET"},
|
|
},
|
|
},
|
|
}, logger)
|
|
if err != nil {
|
|
t.Fatalf("Failed to create caching extension: %v", err)
|
|
}
|
|
|
|
// Create routing extension
|
|
routingExt, _ := extension.NewRoutingExtension(map[string]interface{}{
|
|
"regex_locations": map[string]interface{}{
|
|
"__default__": map[string]interface{}{
|
|
"proxy_pass": backend.URL(),
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
server := StartTestServer(t, &ServerConfig{
|
|
Extensions: []extension.Extension{cachingExt, routingExt},
|
|
})
|
|
defer server.Close()
|
|
|
|
client := NewHTTPClient(server.URL)
|
|
|
|
// First request - should be MISS
|
|
resp1, err := client.Get("/api/data", nil)
|
|
if err != nil {
|
|
t.Fatalf("Request 1 failed: %v", err)
|
|
}
|
|
|
|
cacheHeader1 := resp1.Header.Get("X-Cache")
|
|
var result1 map[string]interface{}
|
|
json.NewDecoder(resp1.Body).Decode(&result1)
|
|
resp1.Body.Close()
|
|
|
|
if cacheHeader1 != "MISS" {
|
|
t.Errorf("Expected X-Cache: MISS for first request, got %q", cacheHeader1)
|
|
}
|
|
|
|
// Second request - should be HIT (same response)
|
|
resp2, err := client.Get("/api/data", nil)
|
|
if err != nil {
|
|
t.Fatalf("Request 2 failed: %v", err)
|
|
}
|
|
|
|
cacheHeader2 := resp2.Header.Get("X-Cache")
|
|
var result2 map[string]interface{}
|
|
json.NewDecoder(resp2.Body).Decode(&result2)
|
|
resp2.Body.Close()
|
|
|
|
if cacheHeader2 != "HIT" {
|
|
t.Errorf("Expected X-Cache: HIT for second request, got %q", cacheHeader2)
|
|
}
|
|
|
|
// Verify same response (from cache)
|
|
if result1["request_number"] != result2["request_number"] {
|
|
t.Errorf("Expected same request_number from cache, got %v and %v",
|
|
result1["request_number"], result2["request_number"])
|
|
}
|
|
|
|
// Backend should only receive 1 request
|
|
if atomic.LoadInt64(&requestCount) != 1 {
|
|
t.Errorf("Expected 1 backend request, got %d", requestCount)
|
|
}
|
|
}
|
|
|
|
// ============== TTL Expiration Tests ==============
|
|
|
|
func TestCaching_TTLExpiration(t *testing.T) {
|
|
var requestCount int64
|
|
|
|
backend := StartBackend(func(w http.ResponseWriter, r *http.Request) {
|
|
count := atomic.AddInt64(&requestCount, 1)
|
|
w.Header().Set("Content-Type", "application/json")
|
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
|
"request_number": count,
|
|
})
|
|
})
|
|
defer backend.Close()
|
|
|
|
logger := createTestLogger(t)
|
|
|
|
// Create caching extension with short TTL
|
|
cachingExt, _ := extension.NewCachingExtension(map[string]interface{}{
|
|
"default_ttl": "100ms", // Very short TTL for testing
|
|
"cache_patterns": []interface{}{
|
|
map[string]interface{}{
|
|
"pattern": "^/api/.*",
|
|
"ttl": "100ms",
|
|
"methods": []interface{}{"GET"},
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
routingExt, _ := extension.NewRoutingExtension(map[string]interface{}{
|
|
"regex_locations": map[string]interface{}{
|
|
"__default__": map[string]interface{}{
|
|
"proxy_pass": backend.URL(),
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
server := StartTestServer(t, &ServerConfig{
|
|
Extensions: []extension.Extension{cachingExt, routingExt},
|
|
})
|
|
defer server.Close()
|
|
|
|
client := NewHTTPClient(server.URL)
|
|
|
|
// First request
|
|
resp1, _ := client.Get("/api/data", nil)
|
|
var result1 map[string]interface{}
|
|
json.NewDecoder(resp1.Body).Decode(&result1)
|
|
resp1.Body.Close()
|
|
|
|
// Second request (within TTL) - should be HIT
|
|
resp2, _ := client.Get("/api/data", nil)
|
|
cacheHeader2 := resp2.Header.Get("X-Cache")
|
|
resp2.Body.Close()
|
|
|
|
if cacheHeader2 != "HIT" {
|
|
t.Errorf("Expected X-Cache: HIT before TTL expires, got %q", cacheHeader2)
|
|
}
|
|
|
|
// Wait for TTL to expire
|
|
time.Sleep(150 * time.Millisecond)
|
|
|
|
// Third request (after TTL) - should be MISS
|
|
resp3, _ := client.Get("/api/data", nil)
|
|
cacheHeader3 := resp3.Header.Get("X-Cache")
|
|
var result3 map[string]interface{}
|
|
json.NewDecoder(resp3.Body).Decode(&result3)
|
|
resp3.Body.Close()
|
|
|
|
if cacheHeader3 != "MISS" {
|
|
t.Errorf("Expected X-Cache: MISS after TTL expires, got %q", cacheHeader3)
|
|
}
|
|
|
|
// Verify new request was made (different request_number)
|
|
if result1["request_number"] == result3["request_number"] {
|
|
t.Error("Expected different request_number after TTL expiration")
|
|
}
|
|
}
|
|
|
|
// ============== Pattern-Based Caching Tests ==============
|
|
|
|
func TestCaching_PatternBasedCaching(t *testing.T) {
|
|
var apiCount, staticCount int64
|
|
|
|
backend := StartBackend(func(w http.ResponseWriter, r *http.Request) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
if r.URL.Path[:5] == "/api/" {
|
|
atomic.AddInt64(&apiCount, 1)
|
|
} else {
|
|
atomic.AddInt64(&staticCount, 1)
|
|
}
|
|
json.NewEncoder(w).Encode(map[string]string{"path": r.URL.Path})
|
|
})
|
|
defer backend.Close()
|
|
|
|
logger := createTestLogger(t)
|
|
|
|
// Only cache /api/* paths
|
|
cachingExt, _ := extension.NewCachingExtension(map[string]interface{}{
|
|
"default_ttl": "1m",
|
|
"cache_patterns": []interface{}{
|
|
map[string]interface{}{
|
|
"pattern": "^/api/.*",
|
|
"ttl": "1m",
|
|
"methods": []interface{}{"GET"},
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
routingExt, _ := extension.NewRoutingExtension(map[string]interface{}{
|
|
"regex_locations": map[string]interface{}{
|
|
"__default__": map[string]interface{}{
|
|
"proxy_pass": backend.URL(),
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
server := StartTestServer(t, &ServerConfig{
|
|
Extensions: []extension.Extension{cachingExt, routingExt},
|
|
})
|
|
defer server.Close()
|
|
|
|
client := NewHTTPClient(server.URL)
|
|
|
|
// Multiple requests to /api/ - should be cached
|
|
for i := 0; i < 3; i++ {
|
|
resp, _ := client.Get("/api/users", nil)
|
|
resp.Body.Close()
|
|
}
|
|
|
|
// Multiple requests to /static/ - should NOT be cached (not matching pattern)
|
|
for i := 0; i < 3; i++ {
|
|
resp, _ := client.Get("/static/file.js", nil)
|
|
resp.Body.Close()
|
|
}
|
|
|
|
// API should have only 1 request (cached)
|
|
if atomic.LoadInt64(&apiCount) != 1 {
|
|
t.Errorf("Expected 1 API request (cached), got %d", apiCount)
|
|
}
|
|
|
|
// Static should have 3 requests (not cached)
|
|
if atomic.LoadInt64(&staticCount) != 3 {
|
|
t.Errorf("Expected 3 static requests (not cached), got %d", staticCount)
|
|
}
|
|
}
|
|
|
|
// ============== Method-Specific Caching Tests ==============
|
|
|
|
func TestCaching_OnlyGETMethodCached(t *testing.T) {
|
|
var getCount, postCount int64
|
|
|
|
backend := StartBackend(func(w http.ResponseWriter, r *http.Request) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
if r.Method == "GET" {
|
|
atomic.AddInt64(&getCount, 1)
|
|
} else if r.Method == "POST" {
|
|
atomic.AddInt64(&postCount, 1)
|
|
}
|
|
json.NewEncoder(w).Encode(map[string]string{
|
|
"method": r.Method,
|
|
})
|
|
})
|
|
defer backend.Close()
|
|
|
|
logger := createTestLogger(t)
|
|
|
|
cachingExt, _ := extension.NewCachingExtension(map[string]interface{}{
|
|
"default_ttl": "1m",
|
|
"cache_patterns": []interface{}{
|
|
map[string]interface{}{
|
|
"pattern": "^/api/.*",
|
|
"ttl": "1m",
|
|
"methods": []interface{}{"GET"}, // Only GET
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
routingExt, _ := extension.NewRoutingExtension(map[string]interface{}{
|
|
"regex_locations": map[string]interface{}{
|
|
"__default__": map[string]interface{}{
|
|
"proxy_pass": backend.URL(),
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
server := StartTestServer(t, &ServerConfig{
|
|
Extensions: []extension.Extension{cachingExt, routingExt},
|
|
})
|
|
defer server.Close()
|
|
|
|
client := NewHTTPClient(server.URL)
|
|
|
|
// Multiple GET requests - should be cached
|
|
for i := 0; i < 3; i++ {
|
|
resp, _ := client.Get("/api/data", nil)
|
|
resp.Body.Close()
|
|
}
|
|
|
|
// Multiple POST requests - should NOT be cached
|
|
for i := 0; i < 3; i++ {
|
|
resp, _ := client.Post("/api/data", []byte(`{}`), map[string]string{
|
|
"Content-Type": "application/json",
|
|
})
|
|
resp.Body.Close()
|
|
}
|
|
|
|
if atomic.LoadInt64(&getCount) != 1 {
|
|
t.Errorf("Expected 1 GET request (cached), got %d", getCount)
|
|
}
|
|
|
|
if atomic.LoadInt64(&postCount) != 3 {
|
|
t.Errorf("Expected 3 POST requests (not cached), got %d", postCount)
|
|
}
|
|
}
|
|
|
|
// ============== Different Paths Different Cache Keys ==============
|
|
|
|
func TestCaching_DifferentPathsDifferentCacheKeys(t *testing.T) {
|
|
var requestCount int64
|
|
|
|
backend := StartBackend(func(w http.ResponseWriter, r *http.Request) {
|
|
count := atomic.AddInt64(&requestCount, 1)
|
|
w.Header().Set("Content-Type", "application/json")
|
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
|
"path": r.URL.Path,
|
|
"request_number": count,
|
|
})
|
|
})
|
|
defer backend.Close()
|
|
|
|
logger := createTestLogger(t)
|
|
|
|
cachingExt, _ := extension.NewCachingExtension(map[string]interface{}{
|
|
"default_ttl": "1m",
|
|
"cache_patterns": []interface{}{
|
|
map[string]interface{}{
|
|
"pattern": "^/api/.*",
|
|
"ttl": "1m",
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
routingExt, _ := extension.NewRoutingExtension(map[string]interface{}{
|
|
"regex_locations": map[string]interface{}{
|
|
"__default__": map[string]interface{}{
|
|
"proxy_pass": backend.URL(),
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
server := StartTestServer(t, &ServerConfig{
|
|
Extensions: []extension.Extension{cachingExt, routingExt},
|
|
})
|
|
defer server.Close()
|
|
|
|
client := NewHTTPClient(server.URL)
|
|
|
|
// Request different paths
|
|
paths := []string{"/api/users", "/api/posts", "/api/comments"}
|
|
|
|
for _, path := range paths {
|
|
resp, _ := client.Get(path, nil)
|
|
resp.Body.Close()
|
|
}
|
|
|
|
// Each path should result in a separate backend request
|
|
if atomic.LoadInt64(&requestCount) != 3 {
|
|
t.Errorf("Expected 3 backend requests (one per path), got %d", requestCount)
|
|
}
|
|
|
|
// Request same paths again - all should be cached
|
|
for _, path := range paths {
|
|
resp, _ := client.Get(path, nil)
|
|
cacheHeader := resp.Header.Get("X-Cache")
|
|
resp.Body.Close()
|
|
|
|
if cacheHeader != "HIT" {
|
|
t.Errorf("Expected X-Cache: HIT for %s, got %q", path, cacheHeader)
|
|
}
|
|
}
|
|
|
|
// No additional backend requests
|
|
if atomic.LoadInt64(&requestCount) != 3 {
|
|
t.Errorf("Expected still 3 backend requests after cache hits, got %d", requestCount)
|
|
}
|
|
}
|
|
|
|
// ============== Query String Affects Cache Key ==============
|
|
|
|
func TestCaching_QueryStringAffectsCacheKey(t *testing.T) {
|
|
var requestCount int64
|
|
|
|
backend := StartBackend(func(w http.ResponseWriter, r *http.Request) {
|
|
count := atomic.AddInt64(&requestCount, 1)
|
|
w.Header().Set("Content-Type", "application/json")
|
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
|
"query": r.URL.RawQuery,
|
|
"request_number": count,
|
|
})
|
|
})
|
|
defer backend.Close()
|
|
|
|
logger := createTestLogger(t)
|
|
|
|
cachingExt, _ := extension.NewCachingExtension(map[string]interface{}{
|
|
"default_ttl": "1m",
|
|
"cache_patterns": []interface{}{
|
|
map[string]interface{}{
|
|
"pattern": "^/api/.*",
|
|
"ttl": "1m",
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
routingExt, _ := extension.NewRoutingExtension(map[string]interface{}{
|
|
"regex_locations": map[string]interface{}{
|
|
"__default__": map[string]interface{}{
|
|
"proxy_pass": backend.URL(),
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
server := StartTestServer(t, &ServerConfig{
|
|
Extensions: []extension.Extension{cachingExt, routingExt},
|
|
})
|
|
defer server.Close()
|
|
|
|
client := NewHTTPClient(server.URL)
|
|
|
|
// Different query strings = different cache keys
|
|
queries := []string{
|
|
"/api/search?q=hello",
|
|
"/api/search?q=world",
|
|
"/api/search?q=test",
|
|
}
|
|
|
|
for _, query := range queries {
|
|
resp, _ := client.Get(query, nil)
|
|
resp.Body.Close()
|
|
}
|
|
|
|
// Each unique query should result in a separate backend request
|
|
if atomic.LoadInt64(&requestCount) != 3 {
|
|
t.Errorf("Expected 3 backend requests (one per query), got %d", requestCount)
|
|
}
|
|
|
|
// Same query again should be cached
|
|
resp, _ := client.Get("/api/search?q=hello", nil)
|
|
cacheHeader := resp.Header.Get("X-Cache")
|
|
resp.Body.Close()
|
|
|
|
if cacheHeader != "HIT" {
|
|
t.Errorf("Expected X-Cache: HIT for repeated query, got %q", cacheHeader)
|
|
}
|
|
}
|
|
|
|
// ============== Cache Does Not Store Error Responses ==============
|
|
|
|
func TestCaching_DoesNotCacheErrors(t *testing.T) {
|
|
var requestCount int64
|
|
|
|
backend := StartBackend(func(w http.ResponseWriter, r *http.Request) {
|
|
atomic.AddInt64(&requestCount, 1)
|
|
w.Header().Set("Content-Type", "application/json")
|
|
w.WriteHeader(http.StatusInternalServerError)
|
|
json.NewEncoder(w).Encode(map[string]string{"error": "internal error"})
|
|
})
|
|
defer backend.Close()
|
|
|
|
logger := createTestLogger(t)
|
|
|
|
cachingExt, _ := extension.NewCachingExtension(map[string]interface{}{
|
|
"default_ttl": "1m",
|
|
"cache_patterns": []interface{}{
|
|
map[string]interface{}{
|
|
"pattern": "^/api/.*",
|
|
"ttl": "1m",
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
routingExt, _ := extension.NewRoutingExtension(map[string]interface{}{
|
|
"regex_locations": map[string]interface{}{
|
|
"__default__": map[string]interface{}{
|
|
"proxy_pass": backend.URL(),
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
server := StartTestServer(t, &ServerConfig{
|
|
Extensions: []extension.Extension{cachingExt, routingExt},
|
|
})
|
|
defer server.Close()
|
|
|
|
client := NewHTTPClient(server.URL)
|
|
|
|
// Multiple requests to error endpoint
|
|
for i := 0; i < 3; i++ {
|
|
resp, _ := client.Get("/api/error", nil)
|
|
resp.Body.Close()
|
|
}
|
|
|
|
// All requests should reach backend (errors not cached)
|
|
if atomic.LoadInt64(&requestCount) != 3 {
|
|
t.Errorf("Expected 3 backend requests (errors not cached), got %d", requestCount)
|
|
}
|
|
}
|
|
|
|
// ============== Concurrent Cache Access ==============
|
|
|
|
func TestCaching_ConcurrentAccess(t *testing.T) {
|
|
var requestCount int64
|
|
|
|
backend := StartBackend(func(w http.ResponseWriter, r *http.Request) {
|
|
// Small delay to increase chance of race conditions
|
|
time.Sleep(10 * time.Millisecond)
|
|
count := atomic.AddInt64(&requestCount, 1)
|
|
w.Header().Set("Content-Type", "application/json")
|
|
json.NewEncoder(w).Encode(map[string]interface{}{
|
|
"request_number": count,
|
|
})
|
|
})
|
|
defer backend.Close()
|
|
|
|
logger := createTestLogger(t)
|
|
|
|
cachingExt, _ := extension.NewCachingExtension(map[string]interface{}{
|
|
"default_ttl": "1m",
|
|
"cache_patterns": []interface{}{
|
|
map[string]interface{}{
|
|
"pattern": "^/api/.*",
|
|
"ttl": "1m",
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
routingExt, _ := extension.NewRoutingExtension(map[string]interface{}{
|
|
"regex_locations": map[string]interface{}{
|
|
"__default__": map[string]interface{}{
|
|
"proxy_pass": backend.URL(),
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
server := StartTestServer(t, &ServerConfig{
|
|
Extensions: []extension.Extension{cachingExt, routingExt},
|
|
})
|
|
defer server.Close()
|
|
|
|
const numRequests = 20
|
|
results := make(chan error, numRequests)
|
|
|
|
// Make first request to populate cache
|
|
client := NewHTTPClient(server.URL)
|
|
resp, _ := client.Get("/api/concurrent", nil)
|
|
resp.Body.Close()
|
|
|
|
// Now many concurrent requests should all hit cache
|
|
for i := 0; i < numRequests; i++ {
|
|
go func(n int) {
|
|
client := NewHTTPClient(server.URL)
|
|
resp, err := client.Get("/api/concurrent", nil)
|
|
if err != nil {
|
|
results <- err
|
|
return
|
|
}
|
|
|
|
cacheHeader := resp.Header.Get("X-Cache")
|
|
resp.Body.Close()
|
|
|
|
if cacheHeader != "HIT" {
|
|
results <- fmt.Errorf("request %d: expected HIT, got %s", n, cacheHeader)
|
|
return
|
|
}
|
|
results <- nil
|
|
}(i)
|
|
}
|
|
|
|
// Collect results
|
|
var errors []error
|
|
for i := 0; i < numRequests; i++ {
|
|
if err := <-results; err != nil {
|
|
errors = append(errors, err)
|
|
}
|
|
}
|
|
|
|
if len(errors) > 0 {
|
|
t.Errorf("Got %d errors in concurrent cache access: %v", len(errors), errors[:min(5, len(errors))])
|
|
}
|
|
|
|
// Only 1 request should reach backend (the initial one)
|
|
if atomic.LoadInt64(&requestCount) != 1 {
|
|
t.Errorf("Expected 1 backend request, got %d", requestCount)
|
|
}
|
|
}
|
|
|
|
// ============== Multiple Cache Patterns ==============
|
|
|
|
func TestCaching_MultipleCachePatterns(t *testing.T) {
|
|
var apiCount, staticCount int64
|
|
|
|
backend := StartBackend(func(w http.ResponseWriter, r *http.Request) {
|
|
w.Header().Set("Content-Type", "application/json")
|
|
if len(r.URL.Path) >= 5 && r.URL.Path[:5] == "/api/" {
|
|
atomic.AddInt64(&apiCount, 1)
|
|
} else if len(r.URL.Path) >= 8 && r.URL.Path[:8] == "/static/" {
|
|
atomic.AddInt64(&staticCount, 1)
|
|
}
|
|
json.NewEncoder(w).Encode(map[string]string{"path": r.URL.Path})
|
|
})
|
|
defer backend.Close()
|
|
|
|
logger := createTestLogger(t)
|
|
|
|
cachingExt, _ := extension.NewCachingExtension(map[string]interface{}{
|
|
"default_ttl": "1m",
|
|
"cache_patterns": []interface{}{
|
|
map[string]interface{}{
|
|
"pattern": "^/api/.*",
|
|
"ttl": "30s",
|
|
"methods": []interface{}{"GET"},
|
|
},
|
|
map[string]interface{}{
|
|
"pattern": "^/static/.*",
|
|
"ttl": "1h", // Static files cached longer
|
|
"methods": []interface{}{"GET"},
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
routingExt, _ := extension.NewRoutingExtension(map[string]interface{}{
|
|
"regex_locations": map[string]interface{}{
|
|
"__default__": map[string]interface{}{
|
|
"proxy_pass": backend.URL(),
|
|
},
|
|
},
|
|
}, logger)
|
|
|
|
server := StartTestServer(t, &ServerConfig{
|
|
Extensions: []extension.Extension{cachingExt, routingExt},
|
|
})
|
|
defer server.Close()
|
|
|
|
client := NewHTTPClient(server.URL)
|
|
|
|
// Multiple requests to both patterns
|
|
for i := 0; i < 3; i++ {
|
|
resp1, _ := client.Get("/api/data", nil)
|
|
resp1.Body.Close()
|
|
|
|
resp2, _ := client.Get("/static/app.js", nil)
|
|
resp2.Body.Close()
|
|
}
|
|
|
|
// Both should be cached (1 request each)
|
|
if atomic.LoadInt64(&apiCount) != 1 {
|
|
t.Errorf("Expected 1 API request, got %d", apiCount)
|
|
}
|
|
|
|
if atomic.LoadInt64(&staticCount) != 1 {
|
|
t.Errorf("Expected 1 static request, got %d", staticCount)
|
|
}
|
|
}
|