WebSocket Performance Fundamentals
WebSockets provide full-duplex communication channels over a single TCP connection, enabling real-time data exchange. For QA engineers, testing WebSocket (as discussed in Gatling: High-Performance Load Testing with Scala DSL) performance requires specialized tools and techniques different from traditional HTTP testing.
Key Performance Metrics
- Connection Time - Time to establish WebSocket connection
- Message Latency - Round-trip time for messages
- Throughput - Messages per second (MPS)
- Concurrent Connections - Maximum simultaneous connections
- Message Loss Rate - Percentage of dropped messages
- Memory Usage - Per-connection memory footprint
Testing WebSocket Connections
Artillery WebSocket Load Testing
# websocket-test.yml
config:
target: "ws://localhost:3000"
phases:
- duration: 60
arrivalRate: 10 # 10 new connections per second
name: "Warm up"
- duration: 300
arrivalRate: 50 # Peak load
name: "Peak load"
engines:
socketio:
transports: ["websocket"]
scenarios:
- name: "Chat application"
engine: "socketio"
flow:
- emit:
channel: "join"
data:
room: "general"
username: "user_{{ $uuid }}"
- think: 2
- loop:
- emit:
channel: "message"
data:
text: "Load test message {{ $randomString() }}"
- think: 1
count: 100
- emit:
channel: "leave"
# Run test
artillery run websocket-test.yml
# Results include:
# - Successful connections
# - Failed connections
# - Message latency (p50, p95, p99)
# - Errors
K6 WebSocket Testing
// k6-websocket-test.js
import ws from 'k6/ws' (as discussed in [Load Testing with JMeter: Complete Guide](/blog/jmeter-load-testing));
import { check } from 'k6';
import { Counter, Trend } from 'k6/metrics';
const messagesSent = new Counter('messages_sent');
const messagesReceived = new Counter('messages_received');
const messageLatency = new Trend('message_latency');
export let options = {
stages: [
{ duration: '30s', target: 100 }, // Ramp up to 100 users
{ duration: '1m', target: 100 }, // Stay at 100
{ duration: '30s', target: 500 }, // Ramp to 500
{ duration: '2m', target: 500 }, // Hold at 500
{ duration: '30s', target: 0 }, // Ramp down
],
};
export default function () {
const url = 'ws://localhost:3000/socket';
const params = { tags: { my_tag: 'websocket' } };
const res = ws.connect(url, params, function (socket) {
socket.on('open', () => {
console.log('Connected');
// Send messages periodically
socket.setInterval(() => {
const timestamp = Date.now();
const message = JSON.stringify({
type: 'ping',
timestamp: timestamp,
data: 'test'
});
socket.send(message);
messagesSent.add(1);
}, 1000);
});
socket.on('message', (data) => {
const response = JSON.parse(data);
if (response.type === 'pong') {
const latency = Date.now() - response.timestamp;
messageLatency.add(latency);
messagesReceived.add(1);
}
});
socket.on('close', () => {
console.log('Disconnected');
});
socket.on('error', (e) => {
console.log('Error: ' + e.error());
});
// Keep connection open for test duration
socket.setTimeout(() => {
socket.close();
}, 60000);
});
check(res, { 'status is 101': (r) => r && r.status === 101 });
}
# Run k6 test
k6 run k6-websocket-test.js
# Output includes:
# ✓ Connected successfully
# ✓ Message latency < 100ms
# messages_sent......: 50000
# messages_received..: 49998
# message_latency....: avg=45ms p(95)=78ms
Custom WebSocket Test Client
Node.js Load Test
// websocket-load-test.js
const WebSocket = require('ws');
const { performance } = require('perf_hooks');
class WebSocketLoadTest {
constructor(url, options = {}) {
this.url = url;
this.concurrentConnections = options.connections || 100;
this.messageRate = options.messageRate || 10; // messages/sec per connection
this.duration = options.duration || 60000; // ms
this.connections = [];
this.metrics = {
connectionTime: [],
messageLatency: [],
messagesSent: 0,
messagesReceived: 0,
errors: 0
};
}
async run() {
console.log(`Starting load test:`);
console.log(`- Connections: ${this.concurrentConnections}`);
console.log(`- Message rate: ${this.messageRate}/sec`);
console.log(`- Duration: ${this.duration}ms`);
// Create connections
for (let i = 0; i < this.concurrentConnections; i++) {
await this.createConnection(i);
}
// Run for specified duration
await new Promise(resolve => setTimeout(resolve, this.duration));
// Cleanup
this.connections.forEach(ws => ws.close());
// Report results
this.reportMetrics();
}
async createConnection(id) {
const startTime = performance.now();
const ws = new WebSocket(this.url);
ws.on('open', () => {
const connectionTime = performance.now() - startTime;
this.metrics.connectionTime.push(connectionTime);
// Start sending messages
const interval = setInterval(() => {
if (ws.readyState === WebSocket.OPEN) {
const timestamp = Date.now();
ws.send(JSON.stringify({ id, timestamp, data: 'test' }));
this.metrics.messagesSent++;
} else {
clearInterval(interval);
}
}, 1000 / this.messageRate);
});
ws.on('message', (data) => {
const message = JSON.parse(data);
const latency = Date.now() - message.timestamp;
this.metrics.messageLatency.push(latency);
this.metrics.messagesReceived++;
});
ws.on('error', (error) => {
console.error(`Connection ${id} error:`, error.message);
this.metrics.errors++;
});
this.connections.push(ws);
}
reportMetrics() {
const avgConnectionTime = this.average(this.metrics.connectionTime);
const avgLatency = this.average(this.metrics.messageLatency);
const p95Latency = this.percentile(this.metrics.messageLatency, 0.95);
const p99Latency = this.percentile(this.metrics.messageLatency, 0.99);
const messageLoss = ((this.metrics.messagesSent - this.metrics.messagesReceived) /
this.metrics.messagesSent * 100).toFixed(2);
console.log('\n=== Test Results ===');
console.log(`Connections established: ${this.connections.length}`);
console.log(`Average connection time: ${avgConnectionTime.toFixed(2)}ms`);
console.log(`Messages sent: ${this.metrics.messagesSent}`);
console.log(`Messages received: ${this.metrics.messagesReceived}`);
console.log(`Message loss rate: ${messageLoss}%`);
console.log(`Average latency: ${avgLatency.toFixed(2)}ms`);
console.log(`P95 latency: ${p95Latency.toFixed(2)}ms`);
console.log(`P99 latency: ${p99Latency.toFixed(2)}ms`);
console.log(`Errors: ${this.metrics.errors}`);
}
average(arr) {
return arr.length > 0 ? arr.reduce((a, b) => a + b, 0) / arr.length : 0;
}
percentile(arr, p) {
if (arr.length === 0) return 0;
const sorted = arr.slice().sort((a, b) => a - b);
const index = Math.ceil(sorted.length * p) - 1;
return sorted[index];
}
}
// Run test
const test = new WebSocketLoadTest('ws://localhost:3000', {
connections: 500,
messageRate: 10,
duration: 60000
});
test.run().catch(console.error);
Testing WebSocket Scalability
Horizontal Scaling with Redis
// Server with Redis pub/sub for horizontal scaling
const express = require('express');
const { WebSocketServer } = require('ws');
const redis = require('redis');
const app = express();
const server = app.listen(3000);
const wss = new WebSocketServer({ server });
// Redis clients
const publisher = redis.createClient();
const subscriber = redis.createClient();
subscriber.subscribe('messages');
subscriber.on('message', (channel, message) => {
// Broadcast to all connected clients on this server instance
wss.clients.forEach(client => {
if (client.readyState === WebSocket.OPEN) {
client.send(message);
}
});
});
wss.on('connection', (ws) => {
ws.on('message', (data) => {
// Publish to Redis for other server instances
publisher.publish('messages', data);
});
});
// Test with multiple server instances
// 1. Start server on port 3000
// 2. Start server on port 3001
// 3. Load balance with nginx/haproxy
// 4. Test message delivery across instances
Nginx Load Balancing
# nginx.conf
upstream websocket_backend {
least_conn; # Or ip_hash for sticky sessions
server localhost:3000;
server localhost:3001;
server localhost:3002;
}
server {
listen 80;
location /socket {
proxy_pass http://websocket_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# Timeout settings
proxy_connect_timeout 7d;
proxy_send_timeout 7d;
proxy_read_timeout 7d;
}
}
Monitoring WebSocket Performance
Prometheus Metrics
// WebSocket server with Prometheus metrics
const promClient = require('prom-client');
// Define metrics
const activeConnections = new promClient.Gauge({
name: 'websocket_active_connections',
help: 'Number of active WebSocket connections'
});
const messagesSent = new promClient.Counter({
name: 'websocket_messages_sent_total',
help: 'Total WebSocket messages sent'
});
const messagesReceived = new promClient.Counter({
name: 'websocket_messages_received_total',
help: 'Total WebSocket messages received'
});
const messageLatency = new promClient.Histogram({
name: 'websocket_message_latency_seconds',
help: 'WebSocket message latency',
buckets: [0.001, 0.01, 0.1, 0.5, 1, 2, 5]
});
wss.on('connection', (ws) => {
activeConnections.inc();
ws.on('message', (data) => {
const start = Date.now();
messagesReceived.inc();
// Process message
const response = handleMessage(data);
ws.send(response);
messagesSent.inc();
const duration = (Date.now() - start) / 1000;
messageLatency.observe(duration);
});
ws.on('close', () => {
activeConnections.dec();
});
});
// Metrics endpoint
app.get('/metrics', async (req, res) => {
res.set('Content-Type', promClient.register.contentType);
res.end(await promClient.register.metrics());
});
Best Practices
Connection Management
// Heartbeat to detect dead connections
function heartbeat() {
this.isAlive = true;
}
wss.on('connection', (ws) => {
ws.isAlive = true;
ws.on('pong', heartbeat);
});
// Ping clients every 30s
const interval = setInterval(() => {
wss.clients.forEach((ws) => {
if (ws.isAlive === false) {
return ws.terminate();
}
ws.isAlive = false;
ws.ping();
});
}, 30000);
wss.on('close', () => {
clearInterval(interval);
});
Backpressure Handling
ws.on('message', async (data) => {
// Check buffer before sending
if (ws.bufferedAmount > 1024 * 1024) { // 1MB
console.log('Backpressure detected, slowing down');
await new Promise(resolve => setTimeout(resolve, 100));
}
ws.send(processedData);
});
Conclusion
WebSocket performance testing requires specialized approaches for real-time communication. By using appropriate load testing (as discussed in Performance Testing: From Load to Stress Testing) tools, implementing proper monitoring, and following best practices for scaling, QA engineers can ensure WebSocket applications perform reliably under load.
Key Takeaways:
- Test connection establishment, message throughput, and latency
- Use tools like Artillery, k6, or custom clients for load testing
- Implement horizontal scaling with message brokers (Redis, RabbitMQ)
- Monitor with Prometheus metrics and Grafana dashboards
- Handle backpressure and dead connections properly
- Test failover and reconnection strategies