A high-performance, reliable network connection pool management system for Go applications.
- Features
- Installation
- Quick Start
- Usage
- Security Features
- Connection Keep-Alive
- Dynamic Adjustment
- Connection Cleanup
- Advanced Usage
- Performance Considerations
- Troubleshooting
- Best Practices
- License
- Thread-safe connection management with mutex protection
- Support for both client and server connection pools
- Dynamic capacity adjustment based on usage patterns
- Automatic connection health monitoring
- Connection keep-alive management for maintaining active connections
- Multiple TLS security modes (none, self-signed, verified)
- Connection identification and tracking
- Graceful error handling and recovery
- Configurable connection creation intervals
- Auto-reconnection with exponential backoff
- Connection activity validation
- Safe cleanup of idle connections
go get github.com/NodePassProject/pool
Here's a minimal example to get you started:
package main
import (
"net"
"time"
"github.com/NodePassProject/pool"
)
func main() {
// Create a client pool
dialer := func() (net.Conn, error) {
return net.Dial("tcp", "example.com:8080")
}
pool := pool.NewClientPool(
5, 20, // min/max capacity
500*time.Millisecond, 5*time.Second, // min/max intervals
30*time.Second, // keep-alive period
"0", // TLS mode
"example.com", // hostname
dialer,
)
// Start the pool manager
go pool.ClientManager()
// Use the pool
conn, err := pool.ClientGet("connection-id")
if err != nil {
// Handle error...
return
}
if conn != nil {
// Use connection...
defer conn.Close()
}
// Clean up
defer pool.Close()
}
package main
import (
"net"
"time"
"github.com/NodePassProject/pool"
)
func main() { // Create a dialer function
dialer := func() (net.Conn, error) {
return net.Dial("tcp", "example.com:8080")
}
// Create a new client pool with:
// - Minimum capacity: 5 connections
// - Maximum capacity: 20 connections
// - Minimum interval: 500ms between connection attempts
// - Maximum interval: 5s between connection attempts
// - Keep-alive period: 30s for connection health monitoring
// - TLS mode: "2" (verified certificates)
// - Hostname for certificate verification: "example.com"
clientPool := pool.NewClientPool(
5, 20,
500*time.Millisecond, 5*time.Second,
30*time.Second,
"2",
"example.com",
dialer,
)
// Start the client manager (usually in a goroutine)
go clientPool.ClientManager()
// Get a connection by ID (usually received from the server)
conn, err := clientPool.ClientGet("connection-id")
if err != nil {
// Handle error...
return
}
// Use the connection...
// When finished with the pool
clientPool.Close()
}
Note: ClientGet
now returns (net.Conn, error)
. The error indicates if the connection with the specified ID was not found in the pool.
package main
import (
"crypto/tls"
"net"
"github.com/NodePassProject/pool"
)
func main() {
// Create a listener
listener, err := net.Listen("tcp", ":8080")
if err != nil {
panic(err)
}
// Optional: Create a TLS config
tlsConfig := &tls.Config{
// Configure TLS settings
MinVersion: tls.VersionTLS13, }
// Create a new server pool
// - Restrict to specific client IP (optional, "" for any IP, "192.168.1.10" to only allow that specific IP)
// - Use TLS config (optional, nil for no TLS)
// - Use the created listener
// - Keep-alive period: 30s for connection health monitoring
serverPool := pool.NewServerPool("192.168.1.10", tlsConfig, listener, 30*time.Second)
// Start the server manager (usually in a goroutine)
go serverPool.ServerManager()
// Get a new connection from the pool (blocks until available)
id, conn, err := serverPool.ServerGet(30 * time.Second)
if err != nil {
// Handle error (timeout or pool closed)
return
}
// Use the connection...
// When finished with the pool
serverPool.Close()
}
Note: ServerGet
now returns (string, net.Conn, error)
. The error can indicate:
- Timeout: when no connection becomes available within the specified timeout
- Context cancellation: when the pool is being closed
- Other pool-related errors
When you finish using a connection, you can return it to the pool using the Put
method. This helps avoid connection leaks and maximizes reuse:
// After using the connection
pool.Put(id, conn)
id
is the connection ID generated by the server.conn
is thenet.Conn
object you want to return.
If the pool is full or the connection is already present, Put
will close the connection automatically.
Best Practice: Always call Put
(or Close
if not reusing) after you are done with a connection to prevent resource leaks.
// Get a connection from client pool by ID
conn, err := clientPool.ClientGet("connection-id")
if err != nil {
// Connection with the specified ID not found
log.Printf("Connection not found: %v", err)
}
// Get a connection from server pool with timeout
id, conn, err := serverPool.ServerGet(30 * time.Second)
if err != nil {
// Handle various error cases:
// - Timeout: no connection available within the specified time
// - Context cancellation: pool is being closed
// - Other pool errors
log.Printf("Failed to get connection: %v", err)
}
// Check if the pool is ready
if clientPool.Ready() {
// The pool is initialized and ready for use
}
// Get current active connection count
activeConnections := clientPool.Active()
// Get current capacity setting
capacity := clientPool.Capacity()
// Get current connection creation interval
interval := clientPool.Interval()
// Clean idle connections from the pool
// Only removes connections that are available (not in use)
clientPool.Clean()
// Manually flush all connections (rarely needed)
clientPool.Flush()
// Record an error (increases internal error counter)
clientPool.AddError()
// Get the current error count
errorCount := clientPool.ErrorCount()
// Reset the error count to zero
clientPool.ResetError()
The NewServerPool
function allows you to restrict incoming connections to a specific client IP address. The function signature is:
func NewServerPool(
maxCap int,
clientIP string,
tlsConfig *tls.Config,
listener net.Listener,
keepAlive time.Duration,
) *Pool
maxCap
: Maximum pool capacity.clientIP
: Restrict allowed client IP ("" for any).tlsConfig
: TLS configuration (can be nil).listener
: TCP listener.keepAlive
: Keep-alive period.
When the clientIP
parameter is set:
- All connections from other IP addresses will be immediately closed.
- This provides an additional layer of security beyond network firewalls.
- Particularly useful for internal services or dedicated client-server applications.
To allow connections from any IP address, use an empty string:
// Create a server pool that accepts connections from any IP
serverPool := pool.NewServerPool(20, "", tlsConfig, listener, 30*time.Second)
Mode | Description | Security Level | Use Case |
---|---|---|---|
"0" |
No TLS (plain TCP) | None | Internal networks, maximum performance |
"1" |
Self-signed certificates | Medium | Development, testing environments |
"2" |
Verified certificates | High | Production, public networks |
// No TLS - maximum performance
clientPool := pool.NewClientPool(5, 20, minIvl, maxIvl, keepAlive, "0", "example.com", dialer)
// Self-signed TLS - development/testing
clientPool := pool.NewClientPool(5, 20, minIvl, maxIvl, keepAlive, "1", "example.com", dialer)
// Verified TLS - production
clientPool := pool.NewClientPool(5, 20, minIvl, maxIvl, keepAlive, "2", "example.com", dialer)
Implementation Details (from pool.go):
-
Connection ID Generation:
- The server generates an 8-byte ID and sends it to the client after TLS handshake.
- Connection IDs are used for tracking and managing individual connections.
-
ClientGet Method:
- Returns
(net.Conn, error)
where error indicates if the connection ID was not found. - Thread-safe with mutex protection.
- Returns
-
ServerGet Method:
- Returns
(string, net.Conn, error)
where error can indicate timeout, context cancellation, or other pool errors. - Blocks until a connection is available or timeout is reached.
- Validates connection health before returning.
- Returns
-
Put Method:
- Prevents duplicate connections in the pool.
- If the pool is full or the connection is already present, the connection is closed automatically.
-
Clean Method:
- Safely removes only idle connections from the pool.
- Thread-safe with mutex protection.
- Does not affect connections currently in use.
- Useful for regular maintenance and reducing pool size.
-
Flush/Close:
Flush
closes all connections and resets the pool.Close
cancels the context and flushes the pool.
-
Dynamic Adjustment:
adjustInterval
andadjustCapacity
are used internally for pool optimization based on usage and success rate.
-
isActive:
- Checks if a connection is alive by sending an empty write with a short deadline.
-
Error Handling:
AddError
andErrorCount
are thread-safe and use mutex protection.
The pool implements TCP keep-alive functionality to maintain connection health and detect broken connections:
- Automatic Keep-Alive: All connections automatically enable TCP keep-alive
- Configurable Period: Set custom keep-alive periods for both client and server pools
- Connection Health: Helps detect and remove dead connections from the pool
- Network Efficiency: Reduces unnecessary connection overhead
// Client pool with 30-second keep-alive
clientPool := pool.NewClientPool(
5, 20,
500*time.Millisecond, 5*time.Second,
30*time.Second, // Keep-alive period
"2", // TLS mode
"example.com", // hostname
dialer,
)
// Server pool with 60-second keep-alive
serverPool := pool.NewServerPool(
"192.168.1.10",
tlsConfig,
listener,
60*time.Second, // Keep-alive period
)
Period Range | Use Case | Pros | Cons |
---|---|---|---|
15-30s | High-frequency apps, real-time systems | Quick dead connection detection | Higher network overhead |
30-60s | General purpose applications | Balanced performance/overhead | Standard detection time |
60-120s | Low-frequency, batch processing | Minimal network overhead | Slower dead connection detection |
Recommendations:
- Web applications: 30-60 seconds
- Real-time systems: 15-30 seconds
- Batch processing: 60-120 seconds
- Behind NAT/Firewall: Use shorter periods (15-30s)
The pool automatically adjusts:
-
Connection creation intervals based on idle connection count (using
adjustInterval
method)- Decreases interval when pool is under-utilized (< 20% idle connections)
- Increases interval when pool is over-utilized (> 80% idle connections)
-
Connection capacity based on connection creation success rate (using
adjustCapacity
method)- Decreases capacity when success rate is low (< 20%)
- Increases capacity when success rate is high (> 80%)
These adjustments ensure optimal resource usage:
// Check current capacity and interval settings
currentCapacity := clientPool.Capacity()
currentInterval := clientPool.Interval()
The Clean()
method provides a safe way to remove idle connections from the pool without affecting connections currently in use.
// Clean all idle connections from the pool
clientPool.Clean()
Key Features:
- Safe for concurrent use: Only removes connections that are available in the idle pool
- No interruption: Connections currently in use (checked out via
Get
) are not affected - Thread-safe: Uses mutex protection to prevent race conditions
- Non-blocking: Returns immediately after cleaning all idle connections
Scenario | Reason | Frequency |
---|---|---|
Connection pool maintenance | Remove stale or inactive connections | Periodic (e.g., every 5-10 minutes) |
After error spikes | Clear potentially problematic connections | After resolving issues |
Before scaling down | Reduce pool size gracefully | On-demand |
Memory pressure | Free up resources quickly | When memory is constrained |
Periodic Cleanup:
// Run periodic cleanup in a background goroutine
go func() {
ticker := time.NewTicker(5 * time.Minute)
defer ticker.Stop()
for range ticker.C {
clientPool.Clean()
log.Printf("Pool cleaned, active connections: %d", clientPool.Active())
}
}()
Cleanup After High Error Rate:
// Monitor and clean after errors
if clientPool.ErrorCount() > 50 {
log.Println("High error count detected, cleaning pool")
clientPool.Clean()
clientPool.ResetError()
}
On-Demand Cleanup:
// Manual cleanup when needed
func handleMaintenanceRequest(w http.ResponseWriter, r *http.Request) {
clientPool.Clean()
fmt.Fprintf(w, "Pool cleaned successfully")
}
Method | Behavior | Use Case |
---|---|---|
Clean() |
Removes only idle connections | Regular maintenance, safe cleanup |
Flush() |
Removes ALL connections | Emergency reset, shutdown preparation |
// Clean: Safe for ongoing operations
clientPool.Clean() // Removes idle connections, active ones continue working
// Flush: Disruptive, closes everything
clientPool.Flush() // Closes ALL connections, including active ones
Best Practice: Use Clean()
for regular maintenance and Flush()
only when you need to completely reset the pool or before shutdown.
package main
import (
"log"
"net"
"time"
"github.com/NodePassProject/pool"
"github.com/NodePassProject/logs"
)
func main() { logger := logs.NewLogger(logs.Info, true)
clientPool := pool.NewClientPool(
5, 20,
500*time.Millisecond, 5*time.Second,
30*time.Second,
"2",
"example.com",
func() (net.Conn, error) {
conn, err := net.Dial("tcp", "example.com:8080")
if err != nil {
// Log the error
logger.Error("Connection failed: %v", err)
// Record the error in the pool
clientPool.AddError()
}
return conn, err
},
)
go clientPool.ClientManager()
// Your application logic...
}
package main
import (
"context"
"net"
"time"
"github.com/NodePassProject/pool"
)
func main() {
// Create a context that can be cancelled ctx, cancel := context.WithCancel(context.Background())
defer cancel()
clientPool := pool.NewClientPool(
5, 20,
500*time.Millisecond, 5*time.Second,
30*time.Second,
"2",
"example.com",
func() (net.Conn, error) {
// Use context-aware dialer
dialer := net.Dialer{Timeout: 5 * time.Second}
return dialer.DialContext(ctx, "tcp", "example.com:8080")
},
)
go clientPool.ClientManager()
// When needed to stop the pool:
// cancel()
// clientPool.Close()
}
package main
import (
"net"
"sync/atomic"
"time"
"github.com/NodePassProject/pool"
)
func main() {
// Create pools for different servers
serverAddresses := []string{
"server1.example.com:8080",
"server2.example.com:8080",
"server3.example.com:8080",
}
pools := make([]*pool.Pool, len(serverAddresses))
for i, addr := range serverAddresses {
serverAddr := addr // Create local copy for closure
pools[i] = pool.NewClientPool(
5, 20,
500*time.Millisecond, 5*time.Second,
30*time.Second,
"2",
serverAddr[:len(serverAddr)-5], // Extract hostname
func() (net.Conn, error) {
return net.Dial("tcp", serverAddr)
},
)
go pools[i].ClientManager()
}
// Simple round-robin load balancer
var counter int32 = 0
getNextPool := func() *pool.Pool {
next := atomic.AddInt32(&counter, 1) % int32(len(pools))
return pools[next]
}
// Usage
id, conn, err := getNextPool().ServerGet(30 * time.Second)
if err != nil {
// Handle error...
return
}
// Use connection...
// When done with all pools
for _, p := range pools {
p.Close()
}
}
Pool Size | Pros | Cons | Best For |
---|---|---|---|
Too Small (< 5) | Low resource usage | Connection contention, delays | Low-traffic applications |
Optimal (5-50) | Balanced performance | Requires monitoring | Most applications |
Too Large (> 100) | No contention | Resource waste, server overload | High-traffic, many clients |
Sizing Guidelines:
- Start with
minCap = baseline_load
andmaxCap = peak_load Ă— 1.5
- Monitor connection usage with
pool.Active()
andpool.Capacity()
- Adjust based on observed patterns
Aspect | No TLS | Self-signed TLS | Verified TLS |
---|---|---|---|
Handshake Time | ~1ms | ~10-50ms | ~50-100ms |
Memory Usage | Low | Medium | High |
CPU Overhead | Minimal | Medium | High |
Throughput | Maximum | ~80% of max | ~60% of max |
The isActive
method performs lightweight connection health checks:
- Cost: ~1ms per validation
- Frequency: On connection retrieval
- Trade-off: Reliability vs. slight performance overhead
For ultra-high-throughput systems, consider implementing custom validation strategies.
Symptoms: Connections fail to establish
Solutions:
- Check network connectivity to target host
- Verify server address and port are correct
- Increase connection timeout in dialer:
dialer := func() (net.Conn, error) { d := net.Dialer{Timeout: 10 * time.Second} return d.Dial("tcp", "example.com:8080") }
Symptoms: TLS connections fail with certificate errors
Solutions:
- Verify certificate validity and expiration
- Check hostname matches certificate Common Name
- For testing, temporarily use TLS mode
"1"
:pool := pool.NewClientPool(5, 20, minIvl, maxIvl, keepAlive, "1", hostname, dialer)
Symptoms: ServerGet()
returns an error or times out
Solutions:
- Check network connectivity to target host
- Verify server address and port are correct
- Increase maximum capacity
- Reduce connection hold time in application code
- Check for connection leaks (ensure connections are properly closed)
- Monitor with
pool.Active()
andpool.ErrorCount()
- Use appropriate timeout values with
ServerGet(timeout)
Symptoms: Frequent connection failures
Solutions:
- Implement exponential backoff in dialer
- Monitor server-side issues
- Track errors with
pool.AddError()
andpool.ErrorCount()
- Network connectivity: Can you ping/telnet to the target?
- Port availability: Is the target port open and listening?
- Certificate validity: For TLS, are certificates valid and not expired?
- Pool capacity: Is
maxCap
sufficient for your load? - Connection leaks: Are you properly closing connections?
- Error monitoring: Are you tracking
pool.ErrorCount()
?
Add logging at key points for better debugging:
dialer := func() (net.Conn, error) {
log.Printf("Attempting connection to %s", address)
conn, err := net.Dial("tcp", address)
if err != nil {
log.Printf("Connection failed: %v", err)
pool.AddError() // Track the error
} else {
log.Printf("Connection established successfully")
}
return conn, err
}
// For most applications, start with these guidelines:
minCap := expectedConcurrentConnections
maxCap := peakConcurrentConnections * 1.5
// Example for a web service handling 100 concurrent requests
clientPool := pool.NewClientPool(
100, 150, // min/max capacity based on load
500*time.Millisecond, 2*time.Second, // connection intervals
30*time.Second, // keep-alive
"2", // verified TLS for production
"api.example.com", // hostname
dialer,
)
// Aggressive (high-frequency applications)
minInterval := 100 * time.Millisecond
maxInterval := 1 * time.Second
// Balanced (general purpose)
minInterval := 500 * time.Millisecond
maxInterval := 5 * time.Second
// Conservative (low-frequency, batch processing)
minInterval := 2 * time.Second
maxInterval := 10 * time.Second
// GOOD: Always return connections
id, conn, err := serverPool.ServerGet(30 * time.Second)
if err != nil {
// Handle timeout or other errors
log.Printf("Failed to get connection: %v", err)
return err
}
if conn != nil {
defer func() {
if err := processData(conn); err != nil {
conn.Close() // Close on error
} else {
serverPool.Put(id, conn) // Return to pool on success
}
}()
// Use connection...
}
// BAD: Forgetting to return connections leads to pool exhaustion
id, conn, _ := serverPool.ServerGet(30 * time.Second)
// Missing Put() or Close() - causes connection leak!
// Use reasonable timeouts for ServerGet
timeout := 10 * time.Second
id, conn, err := serverPool.ServerGet(timeout)
if err != nil {
// Handle timeout or other errors
log.Printf("Failed to get connection within %v: %v", timeout, err)
return err
}
if conn == nil {
// This should not happen if err is nil, but keeping for safety
log.Printf("Unexpected: got nil connection without error")
return errors.New("unexpected nil connection")
}
type PoolManager struct {
pool *pool.Pool
cleanTicker *time.Ticker
logger *log.Logger
}
func (pm *PoolManager) startPeriodicCleanup() {
pm.cleanTicker = time.NewTicker(5 * time.Minute)
go func() {
for range pm.cleanTicker.C {
beforeCount := pm.pool.Active()
pm.pool.Clean()
afterCount := pm.pool.Active()
cleaned := beforeCount - afterCount
pm.logger.Printf("Pool cleanup: removed %d idle connections, %d remaining",
cleaned, afterCount)
// Alert if too many connections were cleaned
if cleaned > beforeCount/2 && beforeCount > 0 {
pm.logger.Printf("WARNING: Cleaned more than 50%% of connections")
}
}
}()
}
func (pm *PoolManager) stopPeriodicCleanup() {
if pm.cleanTicker != nil {
pm.cleanTicker.Stop()
}
}
type PoolManager struct {
pool *pool.Pool
metrics *metrics.Registry
logger *log.Logger
}
func (pm *PoolManager) getConnectionWithRetry(maxRetries int) (string, net.Conn, error) {
for i := 0; i < maxRetries; i++ {
id, conn, err := pm.pool.ServerGet(5 * time.Second)
if err == nil && conn != nil {
return id, conn, nil
}
// Log and track the error
pm.logger.Printf("Connection attempt %d failed: %v", i+1, err)
pm.pool.AddError()
// Exponential backoff
time.Sleep(time.Duration(math.Pow(2, float64(i))) * time.Second)
}
return "", nil, errors.New("max retries exceeded")
}
// Monitor pool health periodically
func (pm *PoolManager) healthCheck() {
ticker := time.NewTicker(30 * time.Second)
defer ticker.Stop()
for range ticker.C {
active := pm.pool.Active()
capacity := pm.pool.Capacity()
errors := pm.pool.ErrorCount()
pm.logger.Printf("Pool health: %d/%d active, %d errors", active, capacity, errors)
// Reset error count periodically
if errors > 100 {
pm.pool.ResetError()
}
// Alert if pool utilization is consistently high
if float64(active)/float64(capacity) > 0.9 {
pm.logger.Printf("WARNING: Pool utilization high (%d/%d)", active, capacity)
}
}
}
// Production setup with proper TLS
func createProductionPool() *pool.Pool {
return pool.NewClientPool(
20, 100, // Production-scale capacity
500*time.Millisecond, 5*time.Second,
30*time.Second,
"2", // Always use verified TLS in production
"secure-api.company.com", // Proper hostname for certificate verification
createSecureDialer(),
)
}
func createSecureDialer() func() (net.Conn, error) {
return func() (net.Conn, error) {
dialer := &net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 30 * time.Second,
}
return dialer.Dial("tcp", "secure-api.company.com:443")
}
}
func (app *Application) Shutdown(ctx context.Context) error {
// Stop accepting new requests first
app.server.Shutdown(ctx)
// Allow existing connections to complete
select {
case <-time.After(30 * time.Second):
app.logger.Println("Forcing pool shutdown after timeout")
case <-ctx.Done():
}
// Close all pool connections
app.clientPool.Close()
app.serverPool.Close()
return nil
}
// ANTI-PATTERN: Creating pools repeatedly
func badHandler(w http.ResponseWriter, r *http.Request) {
// DON'T: Create a new pool for each request
pool := pool.NewClientPool(5, 10, time.Second, time.Second, 30*time.Second, "2", "api.com", dialer)
defer pool.Close()
}
// GOOD PATTERN: Reuse pools
type Server struct {
apiPool *pool.Pool // Shared pool instance
}
func (s *Server) goodHandler(w http.ResponseWriter, r *http.Request) {
// DO: Reuse existing pool
id, conn, err := s.apiPool.ServerGet(10 * time.Second)
if err != nil {
// Handle error
http.Error(w, "Service unavailable", http.StatusServiceUnavailable)
return
}
if conn != nil {
defer s.apiPool.Put(id, conn)
// Use connection...
}
}
// High-throughput, low-latency services
highThroughputPool := pool.NewClientPool(
50, 200, // Large pool for many concurrent connections
100*time.Millisecond, 1*time.Second, // Fast connection creation
15*time.Second, // Short keep-alive for quick failure detection
"2", "fast-api.com", dialer,
)
// Batch processing, memory-constrained services
batchPool := pool.NewClientPool(
5, 20, // Smaller pool to conserve memory
2*time.Second, 10*time.Second, // Slower connection creation
60*time.Second, // Longer keep-alive for stable connections
"2", "batch-api.com", dialer,
)
// Development/testing setup
func createDevPool() *pool.Pool {
return pool.NewClientPool(
2, 5, // Smaller pool for development
time.Second, 3*time.Second,
30*time.Second,
"1", // Self-signed TLS acceptable for dev
"localhost", // Local development hostname
createLocalDialer(),
)
}
func TestPoolIntegration(t *testing.T) {
// Create a test server
listener, err := net.Listen("tcp", "localhost:0")
require.NoError(t, err)
defer listener.Close()
// Create server pool
serverPool := pool.NewServerPool(5, "", nil, listener, 10*time.Second)
go serverPool.ServerManager()
defer serverPool.Close()
// Create client pool
addr := listener.Addr().String()
clientPool := pool.NewClientPool(
2, 5, time.Second, 3*time.Second, 10*time.Second,
"0", // No TLS for testing
strings.Split(addr, ":")[0],
func() (net.Conn, error) { return net.Dial("tcp", addr) },
)
go clientPool.ClientManager()
defer clientPool.Close()
// Test connection flow
id, conn, err := serverPool.ServerGet(5 * time.Second)
require.NoError(t, err)
require.NotNil(t, conn)
require.NotEmpty(t, id)
// Test client get connection
clientConn, err := clientPool.ClientGet(id)
require.NoError(t, err)
require.NotNil(t, clientConn)
// Test error case - non-existent ID
_, err = clientPool.ClientGet("non-existent-id")
require.Error(t, err)
// Test timeout case
_, _, err = serverPool.ServerGet(1 * time.Millisecond)
require.Error(t, err)
// ... additional test logic
}
These best practices will help you get the most out of the pool package while maintaining reliability and performance in production environments.
Copyright (c) 2025, NodePassProject. Licensed under the BSD 3-Clause License. See the LICENSE file for details.