Firestore Real-Time Architecture
Firestore vs Traditional Databases
| Feature | Firestore | SQL (PostgreSQL) |
|---|---|---|
| Schema | Flexible (schema-less) | Rigid (schema required) |
| Scaling | Auto (millions of connections) | Manual (vertical/horizontal) |
| Real-time | Built-in listeners | Polling or triggers |
| Offline | Native support | Not available |
| Queries | Limited (index required) | Flexible (JOINs, subqueries) |
| Transactions | Document-level ACID | Multi-row ACID |
| Cost | Per operation + storage | Per instance + storage |
Data Modeling: SQL vs NoSQL Mindset
CRUD Operations (JavaScript)
import {
collection, doc, getDoc, getDocs, addDoc,
updateDoc, deleteDoc, query, where, orderBy, limit
} from "firebase/firestore";
import { db } from "./firebase-config";
// CREATE - Add document with auto-generated ID
const usersRef = collection(db, "users");
const docRef = await addDoc(usersRef, {
name: "John Doe",
email: "john@example.com",
createdAt: new Date(),
premium: false
});
console.log("Created user:", docRef.id);
// CREATE - Set document with custom ID
const userDocRef = doc(db, "users", "user123");
await setDoc(userDocRef, {
name: "Jane Smith",
email: "jane@example.com"
});
// READ - Get single document
const docSnap = await getDoc(userDocRef);
if (docSnap.exists()) {
console.log("User data:", docSnap.data());
} else {
console.log("No such document!");
}
// READ - Query with filters
const q = query(
usersRef,
where("premium", "==", true),
orderBy("createdAt", "desc"),
limit(10)
);
const querySnapshot = await getDocs(q);
querySnapshot.forEach((doc) => {
console.log(doc.id, " => ", doc.data());
});
// UPDATE - Partial update
await updateDoc(userDocRef, {
premium: true,
upgradeDate: new Date()
});
// DELETE
await deleteDoc(userDocRef);
Real-Time Listeners
import { onSnapshot } from "firebase/firestore";
// Listen to single document
const unsubscribe = onSnapshot(userDocRef, (doc) => {
if (doc.exists()) {
console.log("Current data:", doc.data());
// Update UI with new data
updateUserProfile(doc.data());
}
});
// Listen to query results
const q = query(
collection(db, "messages"),
where("roomId", "==", "room123"),
orderBy("createdAt", "desc"),
limit(50)
);
onSnapshot(q, (snapshot) => {
snapshot.docChanges().forEach((change) => {
if (change.type === "added") {
console.log("New message:", change.doc.data());
addMessageToUI(change.doc.data());
}
if (change.type === "modified") {
console.log("Modified message:", change.doc.data());
updateMessageInUI(change.doc.id, change.doc.data());
}
if (change.type === "removed") {
console.log("Removed message:", change.doc.data());
removeMessageFromUI(change.doc.id);
}
});
});
// Cleanup listener
unsubscribe();
Transactions & Batch Writes
import { runTransaction, writeBatch } from "firebase/firestore";
// TRANSACTION - Transfer credits between users
try {
await runTransaction(db, async (transaction) => {
const fromRef = doc(db, "users", "user123");
const toRef = doc(db, "users", "user456");
// Read phase
const fromDoc = await transaction.get(fromRef);
const toDoc = await transaction.get(toRef);
if (!fromDoc.exists() || !toDoc.exists()) {
throw new Error("User not found");
}
const fromCredits = fromDoc.data().credits;
const amount = 100;
if (fromCredits < amount) {
throw new Error("Insufficient credits");
}
// Write phase (atomic)
transaction.update(fromRef, {
credits: fromCredits - amount
});
transaction.update(toRef, {
credits: (toDoc.data().credits || 0) + amount
});
});
console.log("Transaction successful!");
} catch (e) {
console.error("Transaction failed:", e);
}
// BATCH WRITE - Update multiple documents
const batch = writeBatch(db);
// Update multiple users
const users = ["user1", "user2", "user3"];
users.forEach(userId => {
const userRef = doc(db, "users", userId);
batch.update(userRef, { verified: true });
});
// Commit all at once (atomic)
await batch.commit();
Python Admin SDK
import firebase_admin
from firebase_admin import credentials, firestore
from datetime import datetime
# Initialize
cred = credentials.ApplicationDefault()
firebase_admin.initialize_app(cred)
db = firestore.client()
# Create
doc_ref = db.collection('users').document('user123')
doc_ref.set({
'name': 'John Doe',
'email': 'john@example.com',
'created_at': datetime.now(),
'premium': False
})
# Read
doc = doc_ref.get()
if doc.exists:
print(f'Document data: {doc.to_dict()}')
# Query
users_ref = db.collection('users')
query = users_ref.where('premium', '==', True) \
.order_by('created_at', direction=firestore.Query.DESCENDING) \
.limit(10)
for doc in query.stream():
print(f'{doc.id} => {doc.to_dict()}')
# Update
doc_ref.update({
'premium': True,
'upgrade_date': datetime.now()
})
# Delete
doc_ref.delete()
# Batch write
batch = db.batch()
for i in range(10):
doc_ref = db.collection('users').document(f'user{i}')
batch.set(doc_ref, {'verified': True})
batch.commit()
Security Rules
rules_version = '2';
service cloud.firestore {
match /databases/{database}/documents {
// Users can read/write their own document
match /users/{userId} {
allow read: if request.auth != null && request.auth.uid == userId;
allow write: if request.auth != null && request.auth.uid == userId;
}
// Public read, authenticated write
match /posts/{postId} {
allow read: if true; // Anyone can read
allow create: if request.auth != null; // Logged in users can create
allow update, delete: if request.auth != null &&
resource.data.authorId == request.auth.uid;
}
// Field-level validation
match /orders/{orderId} {
allow create: if request.auth != null &&
request.resource.data.amount > 0 &&
request.resource.data.userId == request.auth.uid;
allow read: if request.auth != null &&
resource.data.userId == request.auth.uid;
}
// Role-based access
match /admin/{document=**} {
allow read, write: if request.auth != null &&
get(/databases/$(database)/documents/users/$(request.auth.uid)).data.role == 'admin';
}
}
}
Best Practices
- Denormalize data: Duplicate data to avoid multiple reads
- Create composite indexes: For multi-field queries (auto-suggested)
- Use subcollections wisely: For data belonging to single parent
- Batch writes: Up to 500 operations in single batch
- Offline persistence: Enable for mobile apps (auto-sync)
- Security rules: Test with Rules Playground before deploying
- Monitor costs: Watch read/write counts, optimize queries
- Document size limit: Keep under 1 MB (split if needed)
- Use transactions: For data that must stay consistent
- Collection group queries: Query across subcollections with indexes
Cost Optimization
- Minimize reads: Cache data in clients, use real-time listeners efficiently
- Use limits: Don’t fetch more data than needed
- Optimize indexes: Remove unused composite indexes
- Batch operations: Group writes to reduce write count
- Delete old data: Storage costs add up (TTL policies)
- Monitor usage: Set up billing alerts at thresholds
References
Discover more from C4: Container, Code, Cloud & Context
Subscribe to get the latest posts sent to your email.