Redis Patterns Guide
Data Structure Selection
| Structure | Use Case | Key Commands |
|---|---|---|
| String | Cache, counters, flags, JSON blobs | GET/SET/INCR/SETEX/GETSET |
| Hash | User profiles, object attributes | HGET/HSET/HMGET/HINCRBY |
| List | Message queues, activity feeds, stacks | LPUSH/RPUSH/LPOP/LRANGE/BLPOP |
| Set | Unique visitors, tags, social graph | SADD/SMEMBERS/SINTER/SUNION/SCARD |
| Sorted Set (ZSet) | Leaderboards, rate limiting, priority queue | ZADD/ZRANGE/ZRANK/ZINCRBY |
| HyperLogLog | Approximate unique count (0.81% error) | PFADD/PFCOUNT/PFMERGE |
| Stream | Event log, message broker (append-only) | XADD/XREAD/XGROUP/XACK |
| Bitmap | Daily active users, feature flags per user | SETBIT/GETBIT/BITCOUNT/BITOP |
Caching Patterns
# Cache-Aside (Lazy Loading) — most common
def get_user(user_id):
key = f"user:{user_id}"
cached = redis.get(key)
if cached:
return json.loads(cached) # cache hit
user = db.query("SELECT * FROM users WHERE id = ?", user_id)
redis.setex(key, 3600, json.dumps(user)) # TTL 1 hour
return user
# Write-Through — update cache on every write
def update_user(user_id, data):
db.update("users", user_id, data)
redis.setex(f"user:{user_id}", 3600, json.dumps(data))
# Cache stampede prevention (mutex lock)
def get_with_lock(key, ttl, fetch_fn):
val = redis.get(key)
if val: return val
lock_key = f"lock:{key}"
if redis.set(lock_key, 1, nx=True, ex=10): # acquire lock
try:
val = fetch_fn()
redis.setex(key, ttl, val)
return val
finally:
redis.delete(lock_key)
else:
time.sleep(0.1)
return redis.get(key) # wait and retry
Leaderboard & Rate Limiting
# Leaderboard with Sorted Set
ZADD leaderboard:weekly 1500 "user:42" # set score
ZINCRBY leaderboard:weekly 100 "user:42" # increment score
ZREVRANK leaderboard:weekly "user:42" # rank (0-indexed)
ZREVRANGE leaderboard:weekly 0 9 WITHSCORES # top 10
# Sliding window rate limiting
def is_rate_limited(user_id, max_requests=100, window_sec=60):
key = f"ratelimit:{user_id}"
now = time.time()
pipe = redis.pipeline()
pipe.zremrangebyscore(key, 0, now - window_sec) # remove old
pipe.zadd(key, {str(now): now}) # add current
pipe.zcard(key) # count
pipe.expire(key, window_sec)
_, _, count, _ = pipe.execute()
return count > max_requests
# Token bucket (fixed window, simpler)
def check_limit(key, limit, window):
count = redis.incr(key)
if count == 1:
redis.expire(key, window)
return count > limit
Distributed Lock (Redlock)
# Single-instance lock (simpler, not HA)
lock_key = "lock:resource:42"
lock_val = str(uuid.uuid4()) # unique value to prevent wrong release
# Acquire: SET NX + EX (atomic)
acquired = redis.set(lock_key, lock_val, nx=True, ex=30)
# Release: only if we own it (Lua for atomicity)
RELEASE_SCRIPT = """
if redis.call('get', KEYS[1]) == ARGV[1] then
return redis.call('del', KEYS[1])
else
return 0
end
"""
redis.eval(RELEASE_SCRIPT, 1, lock_key, lock_val)
# Session storage pattern
def create_session(user_id):
token = secrets.token_hex(32)
session_data = json.dumps({"user_id": user_id, "created": time.time()})
redis.setex(f"session:{token}", 86400, session_data) # 24h TTL
return token