🌐 Shared Cache
Distributed caching provides shared cache accessible by all application servers.
When your application runs on one server, caching is simple - just use local memory. But what happens when you scale to multiple servers?
The problem: Each server has its own cache. Data cached on Server 1 isn’t available on Server 2. You’re wasting memory and getting inconsistent results.
The solution: Distributed caching - a shared cache accessible by all servers.
Distributed caching means using a cache that runs on separate servers and is shared by all your application servers.
| Benefit | Description |
|---|---|
| Shared Cache | All servers see the same cached data |
| Larger Capacity | Sum of all cache servers, not limited to one machine |
| High Availability | Cache survives individual server failures |
| Consistency | Updates visible to all servers immediately |
| Memory Efficiency | One copy instead of N copies |
The two most popular distributed caching solutions:
Feature-rich in-memory data store. More than just a cache.
Redis Strengths:
Redis Use Cases:
Simple, fast key-value store. Pure caching solution.
Memcached Strengths:
Memcached Use Cases:
| Feature | Redis | Memcached |
|---|---|---|
| Data Structures | Rich (strings, lists, sets, etc.) | Key-value only |
| Persistence | Yes (RDB, AOF) | No |
| Performance | Fast | Faster (simpler) |
| Memory Efficiency | Higher overhead | Lower overhead |
| Replication | Built-in | Client-side sharding |
| Use Case | Feature-rich caching | Simple caching |
Problem: When Server 1 updates cache, how do Servers 2 and 3 know?
Solutions:
Write-Through to Shared Cache
Cache Invalidation
Short TTL
At the code level, you need to design cache client wrappers that abstract Redis/Memcached:
from abc import ABC, abstractmethodfrom typing import Optional, Anyimport redisimport memcache
class CacheClient(ABC): @abstractmethod def get(self, key: str) -> Optional[Any]: pass
@abstractmethod def set(self, key: str, value: Any, ttl: int = 300) -> bool: pass
@abstractmethod def delete(self, key: str) -> bool: pass
class RedisCacheClient(CacheClient): def __init__(self, host: str = 'localhost', port: int = 6379): self.client = redis.Redis(host=host, port=port, decode_responses=True)
def get(self, key: str) -> Optional[Any]: try: return self.client.get(key) except redis.RedisError: # Handle failure gracefully return None
def set(self, key: str, value: Any, ttl: int = 300) -> bool: try: return self.client.setex(key, ttl, value) except redis.RedisError: return False
def delete(self, key: str) -> bool: try: return bool(self.client.delete(key)) except redis.RedisError: return False
class MemcachedCacheClient(CacheClient): def __init__(self, servers: list = None): self.client = memcache.Client(servers or ['127.0.0.1:11211'])
def get(self, key: str) -> Optional[Any]: try: return self.client.get(key) except Exception: return None
def set(self, key: str, value: Any, ttl: int = 300) -> bool: try: return self.client.set(key, value, time=ttl) except Exception: return False
def delete(self, key: str) -> bool: try: return self.client.delete(key) except Exception: return False
# Usage - application code doesn't care about implementationclass UserService: def __init__(self, cache: CacheClient): self.cache = cache
def get_user(self, user_id: int): # Cache-aside pattern cache_key = f"user:{user_id}" user = self.cache.get(cache_key)
if user: return user
# Cache miss - fetch from DB user = self._fetch_from_db(user_id)
# Store in cache if user: self.cache.set(cache_key, user, ttl=300)
return userimport java.util.Optional;
interface CacheClient { Optional<String> get(String key); boolean set(String key, String value, int ttlSeconds); boolean delete(String key);}
class RedisCacheClient implements CacheClient { private final Jedis jedis;
public RedisCacheClient(String host, int port) { this.jedis = new Jedis(host, port); }
public Optional<String> get(String key) { try { String value = jedis.get(key); return Optional.ofNullable(value); } catch (Exception e) { // Handle failure gracefully return Optional.empty(); } }
public boolean set(String key, String value, int ttlSeconds) { try { return "OK".equals(jedis.setex(key, ttlSeconds, value)); } catch (Exception e) { return false; } }
public boolean delete(String key) { try { return jedis.del(key) > 0; } catch (Exception e) { return false; } }}
class MemcachedCacheClient implements CacheClient { private final MemcachedClient client;
public MemcachedCacheClient(String host, int port) { this.client = new MemcachedClient( new InetSocketAddress(host, port)); }
public Optional<String> get(String key) { try { return Optional.ofNullable((String) client.get(key)); } catch (Exception e) { return Optional.empty(); } }
public boolean set(String key, String value, int ttlSeconds) { try { return client.set(key, ttlSeconds, value).get(); } catch (Exception e) { return false; } }
public boolean delete(String key) { try { return client.delete(key).get(); } catch (Exception e) { return false; } }}
// Usage - application code doesn't care about implementationclass UserService { private final CacheClient cache;
public UserService(CacheClient cache) { this.cache = cache; }
public Optional<User> getUser(int userId) { // Cache-aside pattern String cacheKey = "user:" + userId; Optional<String> cached = cache.get(cacheKey);
if (cached.isPresent()) { return Optional.of(deserialize(cached.get())); }
// Cache miss - fetch from DB Optional<User> user = fetchFromDb(userId);
// Store in cache user.ifPresent(u -> cache.set(cacheKey, serialize(u), 300));
return user; }}Important: Don’t create new connections for each request. Use connection pooling:
import redisfrom redis.connection import ConnectionPool
# Create connection poolpool = ConnectionPool( host='localhost', port=6379, max_connections=50, # Max connections in pool decode_responses=True)
# Reuse pool across requestsclass CacheService: def __init__(self): self.redis = redis.Redis(connection_pool=pool)
def get(self, key: str): return self.redis.get(key)import redis.clients.jedis.JedisPool;import redis.clients.jedis.JedisPoolConfig;
// Create connection poolJedisPoolConfig poolConfig = new JedisPoolConfig();poolConfig.setMaxTotal(50); // Max connectionspoolConfig.setMaxIdle(10); // Max idle connections
JedisPool pool = new JedisPool( poolConfig, "localhost", 6379);
// Reuse pool across requestsclass CacheService { private final JedisPool pool;
public CacheService(JedisPool pool) { this.pool = pool; }
public String get(String key) { try (Jedis jedis = pool.getResource()) { return jedis.get(key); } }}Handle cache failures gracefully:
import timefrom typing import Callable, Optional, Any
class CacheWithRetry: def __init__(self, cache: CacheClient, max_retries: int = 3): self.cache = cache self.max_retries = max_retries
def get_with_retry(self, key: str) -> Optional[Any]: for attempt in range(self.max_retries): try: return self.cache.get(key) except Exception as e: if attempt == self.max_retries - 1: # Last attempt failed - return None (cache miss) return None # Exponential backoff time.sleep(2 ** attempt) return Noneimport java.util.Optional;import java.util.function.Supplier;
class CacheWithRetry { private final CacheClient cache; private final int maxRetries;
public CacheWithRetry(CacheClient cache, int maxRetries) { this.cache = cache; this.maxRetries = maxRetries; }
public Optional<String> getWithRetry(String key) { for (int attempt = 0; attempt < maxRetries; attempt++) { try { return cache.get(key); } catch (Exception e) { if (attempt == maxRetries - 1) { // Last attempt failed - return empty (cache miss) return Optional.empty(); } // Exponential backoff try { Thread.sleep((long) Math.pow(2, attempt) * 1000); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); return Optional.empty(); } } } return Optional.empty(); }}For very large caches, shard data across multiple cache nodes:
Sharding Strategy:
🌐 Shared Cache
Distributed caching provides shared cache accessible by all application servers.
🔴 Redis vs Memcached
Redis = feature-rich, Memcached = simple and fast. Choose based on needs.
🏗️ Abstract Implementation
Design cache interfaces that abstract Redis/Memcached. Makes switching easier.
🔌 Connection Pooling
Always use connection pooling. Don’t create connections per request.