name: “saas-scaffolder”
description: “Generates complete, production-ready SaaS project boilerplate including authentication, database schemas, billing integration, API routes, and a working dashboard using Next.js 14+ App Router, TypeScript, Tailwind CSS, shadcn/ui, Drizzle ORM, and Stripe. Use when the user wants to create a new SaaS app, start a subscription-based web project, scaffold a Next.js application, or mentions terms like starter template, boilerplate, new project, or wiring up auth and payments.”
SaaS Scaffolder
Tier: POWERFUL
Category: Product Team
Domain: Full-Stack Development / Project Bootstrapping
Product: [name]
Description: [1-3 sentences]
Auth: nextauth | clerk | supabase
Database: neondb | supabase | planetscale
Payments: stripe | lemonsqueezy | none
Features: [comma-separated list]
File Tree Output
my-saas/
├── app/
│ ├── (auth)/
│ │ ├── login/page.tsx
│ │ ├── register/page.tsx
│ │ └── layout.tsx
│ ├── (dashboard)/
│ │ ├── dashboard/page.tsx
│ │ ├── settings/page.tsx
│ │ ├── billing/page.tsx
│ │ └── layout.tsx
│ ├── (marketing)/
│ │ ├── page.tsx
│ │ ├── pricing/page.tsx
│ │ └── layout.tsx
│ ├── api/
│ │ ├── auth/[...nextauth]/route.ts
│ │ ├── webhooks/stripe/route.ts
│ │ ├── billing/checkout/route.ts
│ │ └── billing/portal/route.ts
│ └── layout.tsx
├── components/
│ ├── ui/
│ ├── auth/
│ │ ├── login-form.tsx
│ │ └── register-form.tsx
│ ├── dashboard/
│ │ ├── sidebar.tsx
│ │ ├── header.tsx
│ │ └── stats-card.tsx
│ ├── marketing/
│ │ ├── hero.tsx
│ │ ├── features.tsx
│ │ ├── pricing.tsx
│ │ └── footer.tsx
│ └── billing/
│ ├── plan-card.tsx
│ └── usage-meter.tsx
├── lib/
│ ├── auth.ts
│ ├── db.ts
│ ├── stripe.ts
│ ├── validations.ts
│ └── utils.ts
├── db/
│ ├── schema.ts
│ └── migrations/
├── hooks/
│ ├── use-subscription.ts
│ └── use-user.ts
├── types/index.ts
├── middleware.ts
├── .env.example
├── drizzle.config.ts
└── next.config.ts
Key Component Patterns
Auth Config (NextAuth)
// lib/auth.ts
import { NextAuthOptions } from "next-auth"
import GoogleProvider from "next-auth/providers/google"
import { DrizzleAdapter } from "@auth/drizzle-adapter"
import { db } from "./db"
export const authOptions: NextAuthOptions = {
adapter: DrizzleAdapter(db),
providers: [
GoogleProvider({
clientId: process.env.GOOGLE_CLIENT_ID!,
clientSecret: process.env.GOOGLE_CLIENT_SECRET!,
}),
],
callbacks: {
session: async ({ session, user }) => ({
...session,
user: {
...session.user,
id: user.id,
subscriptionStatus: user.subscriptionStatus,
},
}),
},
pages: { signIn: "/login" },
}
Database Schema (Drizzle + NeonDB)
// db/schema.ts
import { pgTable, text, timestamp, integer } from "drizzle-orm/pg-core"
export const users = pgTable("users", {
id: text("id").primaryKey().$defaultFn(() => crypto.randomUUID()),
name: text("name"),
email: text("email").notNull().unique(),
emailVerified: timestamp("emailVerified"),
image: text("image"),
stripeCustomerId: text("stripe_customer_id").unique(),
stripeSubscriptionId: text("stripe_subscription_id"),
stripePriceId: text("stripe_price_id"),
stripeCurrentPeriodEnd: timestamp("stripe_current_period_end"),
createdAt: timestamp("created_at").defaultNow().notNull(),
})
export const accounts = pgTable("accounts", {
userId: text("user_id").notNull().references(() => users.id, { onDelete: "cascade" }),
type: text("type").notNull(),
provider: text("provider").notNull(),
providerAccountId: text("provider_account_id").notNull(),
refresh_token: text("refresh_token"),
access_token: text("access_token"),
expires_at: integer("expires_at"),
})
Stripe Checkout Route
// app/api/billing/checkout/route.ts
import { NextResponse } from "next/server"
import { getServerSession } from "next-auth"
import { authOptions } from "@/lib/auth"
import { stripe } from "@/lib/stripe"
import { db } from "@/lib/db"
import { users } from "@/db/schema"
import { eq } from "drizzle-orm"
export async function POST(req: Request) {
const session = await getServerSession(authOptions)
if (!session?.user) return NextResponse.json({ error: "Unauthorized" }, { status: 401 })
const { priceId } = await req.json()
const [user] = await db.select().from(users).where(eq(users.id, session.user.id))
let customerId = user.stripeCustomerId
if (!customerId) {
const customer = await stripe.customers.create({ email: session.user.email! })
customerId = customer.id
await db.update(users).set({ stripeCustomerId: customerId }).where(eq(users.id, user.id))
}
const checkoutSession = await stripe.checkout.sessions.create({
customer: customerId,
mode: "subscription",
payment_method_types: ["card"],
line_items: [{ price: priceId, quantity: 1 }],
success_url: `${process.env.NEXT_PUBLIC_APP_URL}/dashboard?upgraded=true`,
cancel_url: `${process.env.NEXT_PUBLIC_APP_URL}/pricing`,
subscription_data: { trial_period_days: 14 },
})
return NextResponse.json({ url: checkoutSession.url })
}
Middleware
// middleware.ts
import { withAuth } from "next-auth/middleware"
import { NextResponse } from "next/server"
export default withAuth(
function middleware(req) {
const token = req.nextauth.token
if (req.nextUrl.pathname.startsWith("/dashboard") && !token) {
return NextResponse.redirect(new URL("/login", req.url))
}
},
{ callbacks: { authorized: ({ token }) => !!token } }
)
export const config = {
matcher: ["/dashboard/:path*", "/settings/:path*", "/billing/:path*"],
}
Environment Variables Template
# .env.example
NEXT_PUBLIC_APP_URL=http://localhost:3000
DATABASE_URL=postgresql://user:[email protected]/neondb?sslmode=require
NEXTAUTH_SECRET=generate-with-openssl-rand-base64-32
NEXTAUTH_URL=http://localhost:3000
GOOGLE_CLIENT_ID=
GOOGLE_CLIENT_SECRET=
STRIPE_SECRET_KEY=sk_test_...
STRIPE_WEBHOOK_SECRET=whsec_...
NEXT_PUBLIC_STRIPE_PUBLISHABLE_KEY=pk_test_...
STRIPE_PRO_PRICE_ID=price_...
Scaffold Checklist
The following phases must be completed in order. Validate at the end of each phase before proceeding.
Phase 1 — Foundation
✅ Validate: Run npm run build — no TypeScript or lint errors should appear.
🔧 If build fails: Check tsconfig.json paths and that all shadcn/ui peer dependencies are installed.
Phase 2 — Database
✅ Validate: Run a simple db.select().from(users) in a test script — it should return an empty array without throwing.
🔧 If DB connection fails: Verify DATABASE_URL format includes ?sslmode=require for NeonDB/Supabase. Check that the migration has been applied with drizzle-kit push (dev) or drizzle-kit migrate (prod).
Phase 3 — Authentication
✅ Validate: Sign in via OAuth, confirm session user has id and subscriptionStatus. Attempt to access /dashboard without a session — you should be redirected to /login.
🔧 If sign-out loops occur in production: Ensure NEXTAUTH_SECRET is set and consistent across deployments. Add declare module "next-auth" to extend session types if TypeScript errors appear.
Phase 4 — Payments
✅ Validate: Complete a Stripe test checkout using a 4242 4242 4242 4242 card. Confirm stripeSubscriptionId is written to the DB. Replay the checkout.session.completed webhook event and confirm idempotency (no duplicate DB writes).
🔧 If webhook signature fails: Use stripe listen --forward-to localhost:3000/api/webhooks/stripe locally — never hardcode the raw webhook secret. Verify STRIPE_WEBHOOK_SECRET matches the listener output.
Phase 5 — UI
✅ Validate: Run npm run build for a final production build check. Navigate all routes manually and confirm no broken layouts, missing session data, or hydration errors.
Reference Files
For additional guidance, generate the following companion reference files alongside the scaffold:
CUSTOMIZATION.md — Auth providers, database options, ORM alternatives, payment providers, UI themes, and billing models (per-seat, flat-rate, usage-based).
PITFALLS.md — Common failure modes: missing NEXTAUTH_SECRET, webhook secret mismatches, Edge runtime conflicts with Drizzle, unextended session types, and migration strategy differences between dev and prod.
BEST_PRACTICES.md — Stripe singleton pattern, server actions for form mutations, idempotent webhook handlers, Suspense boundaries for async dashboard data, server-side feature gating via stripeCurrentPeriodEnd, and rate limiting on auth routes with Upstash Redis + @upstash/ratelimit.
SaaS Architecture Patterns
Overview
This reference covers the key architectural decisions when building SaaS applications. Each pattern includes trade-offs and decision criteria to help teams make informed choices early in the development process.
Multi-Tenancy Models
1. Shared Database (Shared Schema)
All tenants share the same database and tables, distinguished by a tenant_id column.
Pros:
- Lowest infrastructure cost
- Simplest deployment and maintenance
- Easy cross-tenant analytics
- Fastest time to market
Cons:
- Risk of data leakage between tenants
- Noisy neighbor performance issues
- Complex data isolation enforcement
- Harder to meet data residency requirements
Best for: Early-stage products, SMB customers, cost-sensitive deployments
2. Schema-Per-Tenant
Each tenant gets their own database schema within a shared database instance.
Pros:
- Better data isolation than shared schema
- Easier per-tenant backup and restore
- Moderate infrastructure efficiency
- Can customize schema per tenant if needed
Cons:
- Schema migration complexity at scale (N migrations per update)
- Connection pooling challenges
- Database instance limits on schema count
- Moderate operational complexity
Best for: Mid-market products, moderate tenant count (100-1,000)
3. Database-Per-Tenant
Each tenant gets a completely separate database instance.
Pros:
- Maximum data isolation and security
- Per-tenant performance tuning
- Easy data residency compliance
- Simple per-tenant backup/restore
- No noisy neighbor issues
Cons:
- Highest infrastructure cost
- Complex deployment automation required
- Cross-tenant queries/analytics challenging
- Connection management overhead
Best for: Enterprise products, regulated industries (healthcare, finance), high-value customers
Decision Matrix
| Factor |
Shared DB |
Schema-Per-Tenant |
DB-Per-Tenant |
| Cost |
Low |
Medium |
High |
| Isolation |
Low |
Medium |
High |
| Scale (tenants) |
10,000+ |
100-1,000 |
10-100 |
| Compliance |
Basic |
Moderate |
Full |
| Complexity |
Low |
Medium |
High |
| Performance |
Shared |
Moderate |
Dedicated |
API-First Design
Principles
- API before UI - Design the API contract before building any frontend
- Versioning from day one - Use URL versioning (
/v1/) or header-based
- Consistent conventions - RESTful resources, standard HTTP methods, consistent error format
- Documentation as code - OpenAPI/Swagger specification maintained alongside code
REST API Standards
- Use nouns for resources (
/users, /projects)
- Use HTTP methods semantically (GET=read, POST=create, PUT=update, DELETE=remove)
- Return appropriate status codes (200, 201, 400, 401, 403, 404, 429, 500)
- Implement pagination (cursor-based for large datasets, offset for small)
- Support filtering, sorting, and field selection
- Rate limiting with clear headers (X-RateLimit-Limit, X-RateLimit-Remaining)
API Design Checklist
Event-Driven Architecture
When to Use
- Decoupling services that evolve independently
- Handling asynchronous workflows (notifications, integrations)
- Building audit trails and activity feeds
- Enabling real-time features (live updates, collaboration)
Event Patterns
- Event Notification: Lightweight event triggers consumer to fetch data
- Event-Carried State Transfer: Event contains all needed data
- Event Sourcing: Store state as sequence of events, derive current state
Implementation Options
- Message Queues: RabbitMQ, Amazon SQS (point-to-point)
- Event Streams: Apache Kafka, Amazon Kinesis (pub/sub, replay)
- Managed PubSub: Google Pub/Sub, AWS EventBridge
- In-App: Redis Streams for lightweight event handling
CQRS (Command Query Responsibility Segregation)
Pattern
- Separate read models (optimized for queries) from write models (optimized for commands)
- Write side handles business logic and validation
- Read side provides denormalized views for fast retrieval
When to Use
- Read/write ratio is heavily skewed (90%+ reads)
- Complex domain logic on write side
- Different scaling needs for reads vs writes
- Multiple read representations of same data needed
When to Avoid
- Simple CRUD applications
- Small-scale applications where complexity is not justified
- Teams without event-driven architecture experience
Microservices vs Monolith Decision Matrix
| Factor |
Monolith |
Microservices |
| Team size |
< 10 engineers |
> 10 engineers |
| Product maturity |
Early stage, exploring |
Established, scaling |
| Deployment frequency |
Weekly-monthly |
Daily per service |
| Domain complexity |
Single bounded context |
Multiple bounded contexts |
| Scaling needs |
Uniform |
Service-specific |
| Operational maturity |
Low (no DevOps team) |
High (platform team) |
| Time to market |
Faster initially |
Slower initially, faster later |
Recommended Path
- Start monolith - Get to product-market fit fast
- Modular monolith - Organize code into bounded contexts
- Extract services - Move high-change or high-scale modules to services
- Full microservices - Only when team and infrastructure justify it
Serverless Considerations
Good Fit
- Infrequent or bursty workloads
- Event-driven processing (webhooks, file processing, notifications)
- API endpoints with variable traffic
- Scheduled jobs and background tasks
Poor Fit
- Long-running processes (>15 min)
- WebSocket connections
- Latency-sensitive operations (cold start impact)
- Heavy compute workloads
Serverless Patterns for SaaS
- API Gateway + Lambda: HTTP request handling
- Event processing: S3/SQS triggers for async work
- Scheduled tasks: CloudWatch Events for cron jobs
- Edge computing: CloudFront Functions for personalization
Infrastructure Recommendations by Stage
| Stage |
Users |
Architecture |
Database |
Hosting |
| MVP |
0-100 |
Monolith |
Shared PostgreSQL |
Single server / PaaS |
| Growth |
100-10K |
Modular monolith |
Managed DB, read replicas |
Auto-scaling group |
| Scale |
10K-100K |
Service extraction |
DB per service, caching |
Kubernetes / ECS |
| Enterprise |
100K+ |
Microservices |
Polyglot persistence |
Multi-region, CDN |
Authentication & Billing Implementation Guide
Overview
Authentication and billing are foundational SaaS capabilities that affect every user interaction. This guide covers implementation patterns, security best practices, and common pitfalls for both systems.
Authentication
OAuth2 / OpenID Connect (OIDC) Flows
Authorization Code Flow (Recommended for Web Apps)
- Redirect user to authorization server (
/authorize)
- User authenticates and consents
- Authorization server redirects back with authorization code
- Backend exchanges code for tokens (
/token)
- Store tokens server-side, issue session cookie
Use when: Server-rendered apps, traditional web applications
Authorization Code Flow + PKCE (Recommended for SPAs and Mobile)
- Generate code verifier and code challenge
- Redirect with code challenge
- User authenticates
- Exchange code + code verifier for tokens
- Store tokens securely (memory for SPAs, secure storage for mobile)
Use when: Single-page applications, mobile apps, any public client
Client Credentials Flow
- Service authenticates with client_id and client_secret
- Receives access token for service-to-service calls
Use when: Backend service-to-service communication, no user context
JWT Best Practices
Token Structure:
- Access token: Short-lived (15-60 minutes), contains user claims
- Refresh token: Longer-lived (7-30 days), stored securely, used to get new access tokens
- ID token: Contains user identity claims, used by frontend only
Security Guidelines:
- Sign tokens with RS256 (asymmetric) for distributed systems
- Include
iss, aud, exp, iat, sub standard claims
- Never store sensitive data in JWT payload (it is base64-encoded, not encrypted)
- Validate all claims on every request
- Implement token rotation for refresh tokens
- Maintain a deny-list for revoked tokens (or use short-lived access tokens)
- Set
httpOnly, secure, sameSite=strict for cookie-stored tokens
Common Pitfalls:
- Using HS256 in distributed systems (shared secret)
- Storing JWTs in localStorage (XSS vulnerable)
- Not validating
aud claim (token reuse attacks)
- Excessively long access token lifetimes
RBAC vs ABAC
Role-Based Access Control (RBAC)
- Assign users to roles (Admin, Editor, Viewer)
- Roles have fixed permission sets
- Simple to implement and understand
- Works well for most SaaS applications
Implementation:
User -> Role -> Permissions
[email protected] -> Admin -> [create, read, update, delete, manage_users]
[email protected] -> Editor -> [create, read, update]
[email protected] -> Viewer -> [read]
Attribute-Based Access Control (ABAC)
- Decisions based on user attributes, resource attributes, environment
- More flexible but more complex
- Required for fine-grained access control
Use ABAC when:
- Access depends on resource ownership (users can edit their own posts)
- Multi-tenant isolation requires tenant-context checks
- Time-based or location-based access rules needed
- Regulatory compliance requires granular audit trails
Social Login Implementation
- Support Google, GitHub, Microsoft at minimum for B2B
- Map social identity to internal user record (by email)
- Handle account linking (same email, different providers)
- Always allow email/password as fallback
- Implement account deduplication strategy
Billing & Subscriptions
Stripe Integration Patterns
Setup Flow
- Create Stripe Customer on user registration
- Store
stripe_customer_id in your database
- Use Stripe Checkout for initial payment (PCI-compliant)
- Store
subscription_id for ongoing management
- Sync plan status via webhooks (source of truth)
Key Stripe Objects
- Customer: Maps to your user/organization
- Product: Maps to your plan tier (Basic, Pro, Enterprise)
- Price: Specific pricing for a product (monthly, annual)
- Subscription: Active billing relationship
- Invoice: Generated per billing cycle
- PaymentIntent: Represents a payment attempt
Subscription Lifecycle
Trial Period
- Offer 7-14 day free trial (no credit card for PLG, card required for sales-led)
- Send reminder emails at 3 days and 1 day before trial ends
- Provide clear upgrade path within the product
- Track trial engagement to predict conversion
Active Subscription
- Sync plan features with entitlement system
- Handle plan upgrades (immediate proration) and downgrades (end of period)
- Support annual billing with discount (typically 15-20%)
- Send receipts and invoice notifications
Payment Failure / Dunning
- First failure: Retry automatically, notify user
- Second failure (3 days later): Retry, send warning email
- Third failure (7 days later): Retry, restrict features
- Final attempt (14 days): Cancel subscription, move to free tier
- Win-back: Send recovery emails at 30, 60, 90 days
Churned
- Downgrade to free tier (maintain data for re-activation)
- Track churn reason (survey on cancellation)
- Implement cancellation flow with save offers
- Define data retention policy (90 days typical)
Reactivated
- Allow easy re-subscription from settings
- Restore previous plan and data
- Consider win-back offers (discount for first month back)
Webhook Handling
Critical Webhooks to Handle:
customer.subscription.created - Activate plan
customer.subscription.updated - Sync plan changes
customer.subscription.deleted - Handle cancellation
invoice.paid - Confirm payment, update status
invoice.payment_failed - Trigger dunning flow
checkout.session.completed - Complete signup flow
Webhook Best Practices:
- Verify webhook signature on every request
- Respond with 200 immediately, process asynchronously
- Implement idempotency (handle duplicate events)
- Log all webhook events for debugging
- Set up webhook failure alerts
- Use Stripe CLI for local development testing
PCI Compliance Basics
SAQ-A (Recommended for SaaS)
- Use Stripe.js / Stripe Elements for card collection
- Never touch raw card numbers on your servers
- Card data goes directly from browser to Stripe
- Your servers only handle tokens and customer IDs
Requirements
Entitlement System Design
Feature Gating Pattern
Check flow:
1. User action requested
2. Look up user's subscription plan
3. Check plan's feature flags / limits
4. Allow or deny with appropriate message
Entitlement Types
- Boolean: Feature on/off (e.g., "SSO enabled")
- Numeric limit: Usage cap (e.g., "10 projects max")
- Tiered: Different capability levels (e.g., "basic/advanced analytics")
Implementation Tips
- Cache entitlements locally (refresh on plan change webhook)
- Show upgrade prompts at limit boundaries (not hard blocks)
- Provide grace periods for brief overages
- Track usage for plan recommendation engine
Technology Stack Comparison
Overview
Choosing the right technology stack is one of the most impactful early decisions for a SaaS product. This comparison covers the most popular options across frontend, backend, database, and caching layers, with decision criteria for each.
Frontend Frameworks
Next.js (React)
Strengths:
- Largest ecosystem and community
- Excellent developer tooling and documentation
- Server-side rendering (SSR) and static generation (SSG) built in
- Vercel deployment makes hosting trivial
- App Router with React Server Components for optimal performance
- Rich component library ecosystem (shadcn/ui, Radix, Chakra)
Weaknesses:
- React learning curve (hooks, state management, rendering model)
- Bundle size can grow without discipline
- Vercel lock-in concerns for advanced features
- Frequent major version changes
Best for: Most SaaS products, teams with React experience, SEO-important pages
Remix (React)
Strengths:
- Web standards focused (forms, HTTP, progressive enhancement)
- Excellent data loading patterns (loaders/actions)
- Built-in error boundaries and optimistic UI
- Works without JavaScript enabled
- Strong TypeScript support
- Deployable anywhere (not tied to specific platform)
Weaknesses:
- Smaller ecosystem than Next.js
- Fewer deployment guides and hosting templates
- Less community content and tutorials
- Now merged into React Router v7 (transition period)
Best for: Data-heavy applications, teams valuing web standards, progressive enhancement needs
SvelteKit (Svelte)
Strengths:
- Smallest bundle sizes (compiler-based, no virtual DOM)
- Simplest learning curve among frameworks
- Built-in state management (reactive declarations)
- Excellent performance out of the box
- Growing ecosystem and community
- First-class TypeScript support
Weaknesses:
- Smaller ecosystem and component library selection
- Fewer developers in hiring pool
- Less enterprise adoption (harder to find case studies)
- Fewer third-party integrations
Best for: Performance-critical applications, small teams wanting simplicity, developer experience priority
Frontend Decision Criteria
| Criterion |
Next.js |
Remix |
SvelteKit |
| Ecosystem Size |
Large |
Medium |
Growing |
| Learning Curve |
Medium |
Medium |
Low |
| Performance |
Good |
Good |
Excellent |
| SSR/SSG |
Excellent |
Good |
Good |
| Hiring Pool |
Large |
Small |
Small |
| Bundle Size |
Medium |
Small |
Smallest |
| TypeScript |
Excellent |
Excellent |
Excellent |
| Deployment Flexibility |
Medium |
High |
High |
Backend Frameworks
Node.js (Express / Fastify / NestJS)
Strengths:
- Same language as frontend (JavaScript/TypeScript full-stack)
- Massive npm ecosystem
- NestJS provides enterprise patterns (DI, modules, decorators)
- Excellent for I/O-heavy workloads
- Large community and hiring pool
- Great for real-time features (WebSockets)
Weaknesses:
- Single-threaded (CPU-intensive tasks require workers)
- Callback/async complexity
- npm dependency security concerns
- Less suited for computational workloads
Best for: Full-stack TypeScript teams, real-time applications, API-heavy products
Python (FastAPI / Django)
Strengths:
- FastAPI: Modern, fast, automatic OpenAPI docs, async support
- Django: Batteries included (admin, ORM, auth, migrations)
- Excellent for data processing and ML integration
- Clean, readable syntax
- Strong ecosystem for analytics and data work
- Large hiring pool across web and data roles
Weaknesses:
- Slower runtime than Go/Rust (mitigated by async in FastAPI)
- GIL limits true parallelism (multiprocessing required)
- Django can feel heavyweight for microservices
- Deployment can be more complex (WSGI/ASGI setup)
Best for: Data-heavy products, ML integration, rapid prototyping, admin-heavy applications
Go (Gin / Echo / Fiber)
Strengths:
- Excellent performance (compiled, concurrent by design)
- Low memory footprint
- Simple deployment (single binary, no runtime)
- Built-in concurrency (goroutines, channels)
- Strong standard library
- Fast compilation
Weaknesses:
- Smaller web ecosystem than Node.js or Python
- More verbose for CRUD operations
- Error handling verbosity
- Fewer ORM options (GORM is the main choice)
- Steeper learning curve for teams from dynamic languages
Best for: High-throughput APIs, microservices, infrastructure tooling, performance-critical backends
Backend Decision Criteria
| Criterion |
Node.js |
Python |
Go |
| Performance |
Good |
Moderate |
Excellent |
| Developer Productivity |
High |
High |
Medium |
| Ecosystem |
Largest |
Large |
Medium |
| Hiring Pool |
Large |
Large |
Medium |
| Full-Stack Synergy |
Excellent |
None |
None |
| Data/ML Integration |
Medium |
Excellent |
Low |
| Concurrency |
Event Loop |
Async/Threads |
Goroutines |
| Deployment Simplicity |
Medium |
Medium |
High |
Database
PostgreSQL
Strengths:
- ACID compliant with excellent reliability
- Rich feature set (JSON, full-text search, GIS, arrays)
- Extensible (custom types, functions, extensions like PostGIS, pgvector)
- Strong community and tooling
- Excellent for complex queries and analytics
- Free and open source with managed options (AWS RDS, Supabase, Neon)
Weaknesses:
- Horizontal scaling requires effort (Citus, partitioning)
- More complex initial setup than MySQL
- VACUUM maintenance at high write volumes
- Slightly slower for simple read-heavy workloads vs MySQL
Best for: Most SaaS applications (recommended default), complex data models, JSON workloads
MySQL
Strengths:
- Proven at massive scale (Meta, Uber, Shopify)
- Simpler replication setup
- Faster for simple read-heavy workloads
- PlanetScale offers serverless MySQL with branching
- Wide hosting support
Weaknesses:
- Fewer advanced features than PostgreSQL
- Weaker JSON support
- Less extensible
- InnoDB limitations for certain workloads
Best for: Read-heavy applications, teams with MySQL expertise, PlanetScale users
Database Decision Criteria
| Criterion |
PostgreSQL |
MySQL |
| Feature Richness |
Excellent |
Good |
| JSON Support |
Excellent |
Moderate |
| Replication |
Good |
Good |
| Horizontal Scale |
Moderate |
Good (PlanetScale) |
| Community |
Excellent |
Excellent |
| Managed Options |
Many |
Many |
| Learning Curve |
Medium |
Low |
| Default Choice |
Yes |
Situational |
Caching Layer
Redis
Strengths:
- Rich data structures (strings, hashes, lists, sets, sorted sets, streams)
- Pub/Sub for real-time messaging
- Lua scripting for atomic operations
- Persistence options (RDB, AOF)
- Cluster mode for horizontal scaling
- Used for caching, sessions, queues, rate limiting, leaderboards
Weaknesses:
- Memory-bound (dataset must fit in RAM)
- Single-threaded command processing
- Licensing changes (Redis 7.4+ source-available)
- Cluster mode adds complexity
Best for: Most SaaS applications (recommended default), session management, rate limiting, queues
Memcached
Strengths:
- Simplest possible key-value cache
- Multi-threaded (better CPU utilization for simple operations)
- Lower memory overhead per key
- Predictable performance characteristics
- Battle-tested at scale
Weaknesses:
- No data structures (strings only)
- No persistence
- No pub/sub or scripting
- No built-in clustering (client-side sharding)
- Limited eviction policies
Best for: Pure caching use cases, simple key-value lookups, memory efficiency priority
Cache Decision Criteria
| Criterion |
Redis |
Memcached |
| Data Structures |
Rich |
Strings Only |
| Persistence |
Yes |
No |
| Pub/Sub |
Yes |
No |
| Multi-Threading |
No (I/O threads in v6) |
Yes |
| Use Cases |
Many |
Caching Only |
| Memory Efficiency |
Good |
Better |
| Default Choice |
Yes |
Rarely |
Recommended Stacks by Product Type
B2B SaaS (Most Common)
- Frontend: Next.js + TypeScript + shadcn/ui
- Backend: Node.js (NestJS) or Python (FastAPI)
- Database: PostgreSQL
- Cache: Redis
- Auth: Auth0 or Clerk
- Payments: Stripe
Developer Tool / API Product
- Frontend: Next.js or SvelteKit
- Backend: Go (Gin) or Node.js (Fastify)
- Database: PostgreSQL
- Cache: Redis
- Auth: Custom JWT + API Keys
- Docs: Mintlify or ReadMe
Data-Heavy / Analytics Product
- Frontend: Next.js
- Backend: Python (FastAPI)
- Database: PostgreSQL + ClickHouse (analytics)
- Cache: Redis
- Processing: Celery or Temporal
- Visualization: Custom or embedded (Metabase)
Real-Time / Collaboration Product
- Frontend: Next.js or SvelteKit
- Backend: Node.js (Fastify) + WebSockets
- Database: PostgreSQL + Redis (pub/sub)
- Cache: Redis
- Real-Time: Socket.io or Liveblocks
- CRDT: Yjs or Automerge (for collaborative editing)
#!/usr/bin/env python3
"""Project Bootstrapper — Generate SaaS project scaffolding from config.
Creates project directory structure with boilerplate files, README,
docker-compose, environment configs, and CI/CD templates.
Usage:
python project_bootstrapper.py config.json --output-dir ./my-project
python project_bootstrapper.py config.json --format json --dry-run
"""
import argparse
import json
import os
import sys
from typing import Dict, List, Any, Optional
from datetime import datetime
STACK_TEMPLATES = {
"nextjs": {
"package.json": lambda c: json.dumps({
"name": c["name"],
"version": "0.1.0",
"private": True,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint",
"test": "jest",
"test:watch": "jest --watch"
},
"dependencies": {
"next": "^14.0.0",
"react": "^18.0.0",
"react-dom": "^18.0.0"
},
"devDependencies": {
"typescript": "^5.0.0",
"@types/react": "^18.0.0",
"@types/node": "^20.0.0",
"eslint": "^8.0.0",
"eslint-config-next": "^14.0.0"
}
}, indent=2),
"tsconfig.json": lambda c: json.dumps({
"compilerOptions": {
"target": "es5",
"lib": ["dom", "dom.iterable", "esnext"],
"allowJs": True,
"skipLibCheck": True,
"strict": True,
"forceConsistentCasingInFileNames": True,
"noEmit": True,
"esModuleInterop": True,
"module": "esnext",
"moduleResolution": "bundler",
"resolveJsonModule": True,
"isolatedModules": True,
"jsx": "preserve",
"incremental": True,
"paths": {"@/*": ["./src/*"]}
},
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"],
"exclude": ["node_modules"]
}, indent=2),
"dirs": ["src/app", "src/components", "src/lib", "src/styles", "public", "tests"],
"files": {
"src/app/layout.tsx": "export default function RootLayout({ children }: { children: React.ReactNode }) {\n return <html lang=\"en\"><body>{children}</body></html>;\n}\n",
"src/app/page.tsx": "export default function Home() {\n return <main><h1>Welcome</h1></main>;\n}\n",
}
},
"express": {
"package.json": lambda c: json.dumps({
"name": c["name"],
"version": "0.1.0",
"main": "src/index.ts",
"scripts": {
"dev": "tsx watch src/index.ts",
"build": "tsc",
"start": "node dist/index.js",
"test": "jest",
"lint": "eslint src/"
},
"dependencies": {
"express": "^4.18.0",
"cors": "^2.8.5",
"helmet": "^7.0.0",
"dotenv": "^16.0.0"
},
"devDependencies": {
"typescript": "^5.0.0",
"@types/express": "^4.17.0",
"@types/cors": "^2.8.0",
"@types/node": "^20.0.0",
"tsx": "^4.0.0",
"jest": "^29.0.0",
"@types/jest": "^29.0.0",
"eslint": "^8.0.0"
}
}, indent=2),
"dirs": ["src/routes", "src/middleware", "src/models", "src/services", "src/utils", "tests"],
"files": {
"src/index.ts": "import express from 'express';\nimport cors from 'cors';\nimport helmet from 'helmet';\nimport { config } from 'dotenv';\n\nconfig();\n\nconst app = express();\nconst PORT = process.env.PORT || 3000;\n\napp.use(helmet());\napp.use(cors());\napp.use(express.json());\n\napp.get('/health', (req, res) => res.json({ status: 'ok' }));\n\napp.listen(PORT, () => console.log(`Server running on port ${PORT}`));\n",
}
},
"fastapi": {
"requirements.txt": lambda c: "fastapi>=0.100.0\nuvicorn[standard]>=0.23.0\npydantic>=2.0.0\npython-dotenv>=1.0.0\nsqlalchemy>=2.0.0\nalembic>=1.12.0\npytest>=7.0.0\nhttpx>=0.24.0\n",
"dirs": ["app/api", "app/models", "app/services", "app/core", "tests", "alembic"],
"files": {
"app/__init__.py": "",
"app/main.py": "from fastapi import FastAPI\nfrom app.core.config import settings\n\napp = FastAPI(title=settings.PROJECT_NAME)\n\n@app.get('/health')\ndef health(): return {'status': 'ok'}\n",
"app/core/__init__.py": "",
"app/core/config.py": "from pydantic_settings import BaseSettings\n\nclass Settings(BaseSettings):\n PROJECT_NAME: str = 'API'\n DATABASE_URL: str = 'sqlite:///./app.db'\n class Config:\n env_file = '.env'\n\nsettings = Settings()\n",
}
}
}
def generate_readme(config: Dict[str, Any]) -> str:
"""Generate README.md content."""
name = config.get("name", "my-project")
desc = config.get("description", "A SaaS application")
stack = config.get("stack", "nextjs")
return f"""# {name}
{desc}
## Tech Stack
- **Framework**: {stack}
- **Database**: {config.get('database', 'PostgreSQL')}
- **Auth**: {config.get('auth', 'JWT')}
## Getting Started
### Prerequisites
- Node.js 18+ / Python 3.11+
- Docker & Docker Compose
### Development
```bash
# Clone the repo
git clone <repo-url>
cd {name}
# Copy environment variables
cp .env.example .env
# Start with Docker
docker compose up -d
# Or run locally
{'npm install && npm run dev' if stack in ('nextjs', 'express') else 'pip install -r requirements.txt && uvicorn app.main:app --reload'}
```
### Testing
```bash
{'npm test' if stack in ('nextjs', 'express') else 'pytest'}
```
## Project Structure
```
{name}/
├── {'src/' if stack in ('nextjs', 'express') else 'app/'}
├── tests/
├── docker-compose.yml
├── .env.example
└── README.md
```
## License
MIT
"""
def generate_env_example(config: Dict[str, Any]) -> str:
"""Generate .env.example file."""
lines = [
"# Application",
f"APP_NAME={config.get('name', 'my-app')}",
"NODE_ENV=development",
"PORT=3000",
"",
"# Database",
]
db = config.get("database", "postgresql")
if db == "postgresql":
lines.extend(["DATABASE_URL=postgresql://user:password@localhost:5432/mydb", ""])
elif db == "mongodb":
lines.extend(["MONGODB_URI=mongodb://localhost:27017/mydb", ""])
elif db == "mysql":
lines.extend(["DATABASE_URL=mysql://user:password@localhost:3306/mydb", ""])
if config.get("auth"):
lines.extend([
"# Auth",
"JWT_SECRET=change-me-in-production",
"JWT_EXPIRY=7d",
""
])
if config.get("features", {}).get("email"):
lines.extend(["# Email", "SMTP_HOST=smtp.example.com", "SMTP_PORT=587", "SMTP_USER=", "SMTP_PASS=", ""])
if config.get("features", {}).get("storage"):
lines.extend(["# Storage", "S3_BUCKET=", "S3_REGION=us-east-1", "AWS_ACCESS_KEY_ID=", "AWS_SECRET_ACCESS_KEY=", ""])
return "\n".join(lines)
def generate_docker_compose(config: Dict[str, Any]) -> str:
"""Generate docker-compose.yml."""
name = config.get("name", "app")
stack = config.get("stack", "nextjs")
db = config.get("database", "postgresql")
services = {
"app": {
"build": ".",
"ports": ["3000:3000"],
"env_file": [".env"],
"depends_on": ["db"] if db else [],
"volumes": [".:/app", "/app/node_modules"] if stack != "fastapi" else [".:/app"]
}
}
if db == "postgresql":
services["db"] = {
"image": "postgres:16-alpine",
"ports": ["5432:5432"],
"environment": {
"POSTGRES_USER": "user",
"POSTGRES_PASSWORD": "password",
"POSTGRES_DB": "mydb"
},
"volumes": ["pgdata:/var/lib/postgresql/data"]
}
elif db == "mongodb":
services["db"] = {
"image": "mongo:7",
"ports": ["27017:27017"],
"volumes": ["mongodata:/data/db"]
}
if config.get("features", {}).get("redis"):
services["redis"] = {
"image": "redis:7-alpine",
"ports": ["6379:6379"]
}
compose = {
"version": "3.8",
"services": services,
"volumes": {}
}
if db == "postgresql":
compose["volumes"]["pgdata"] = {}
elif db == "mongodb":
compose["volumes"]["mongodata"] = {}
# Manual YAML-like output (avoid pyyaml dependency)
nl = "\n"
depends_on = f" depends_on:{nl} - db" if db else ""
vol_line = " pgdata:" if db == "postgresql" else " mongodata:" if db == "mongodb" else " {}"
return f"""version: '3.8'
services:
app:
build: .
ports:
- "3000:3000"
env_file:
- .env
volumes:
- .:/app
{depends_on}
{generate_db_service(db)}
{generate_redis_service(config)}
volumes:
{vol_line}
"""
def generate_db_service(db: str) -> str:
if db == "postgresql":
return """ db:
image: postgres:16-alpine
ports:
- "5432:5432"
environment:
POSTGRES_USER: user
POSTGRES_PASSWORD: password
POSTGRES_DB: mydb
volumes:
- pgdata:/var/lib/postgresql/data
"""
elif db == "mongodb":
return """ db:
image: mongo:7
ports:
- "27017:27017"
volumes:
- mongodata:/data/db
"""
return ""
def generate_redis_service(config: Dict[str, Any]) -> str:
if config.get("features", {}).get("redis"):
return """ redis:
image: redis:7-alpine
ports:
- "6379:6379"
"""
return ""
def generate_gitignore(stack: str) -> str:
"""Generate .gitignore."""
common = "node_modules/\n.env\n.env.local\ndist/\nbuild/\n.next/\n*.log\n.DS_Store\ncoverage/\n__pycache__/\n*.pyc\n.pytest_cache/\n.venv/\n"
return common
def generate_dockerfile(config: Dict[str, Any]) -> str:
"""Generate Dockerfile."""
stack = config.get("stack", "nextjs")
if stack == "fastapi":
return """FROM python:3.11-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
EXPOSE 3000
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "3000"]
"""
return """FROM node:20-alpine
WORKDIR /app
COPY package*.json ./
RUN npm ci
COPY . .
RUN npm run build
EXPOSE 3000
CMD ["npm", "start"]
"""
def scaffold_project(config: Dict[str, Any], output_dir: str, dry_run: bool = False) -> Dict[str, Any]:
"""Generate project scaffolding."""
stack = config.get("stack", "nextjs")
template = STACK_TEMPLATES.get(stack, STACK_TEMPLATES["nextjs"])
files_created = []
# Create directories
for d in template.get("dirs", []):
path = os.path.join(output_dir, d)
if not dry_run:
os.makedirs(path, exist_ok=True)
files_created.append({"path": d + "/", "type": "directory"})
# Create template files
all_files = {}
# Package/requirements file
for key in ("package.json", "requirements.txt"):
if key in template:
all_files[key] = template[key](config)
if "tsconfig.json" in template:
all_files["tsconfig.json"] = template["tsconfig.json"](config)
# Stack-specific files
all_files.update(template.get("files", {}))
# Common files
all_files["README.md"] = generate_readme(config)
all_files[".env.example"] = generate_env_example(config)
all_files[".gitignore"] = generate_gitignore(stack)
all_files["docker-compose.yml"] = generate_docker_compose(config)
all_files["Dockerfile"] = generate_dockerfile(config)
# Write files
for filepath, content in all_files.items():
full_path = os.path.join(output_dir, filepath)
if not dry_run:
os.makedirs(os.path.dirname(full_path), exist_ok=True)
with open(full_path, "w") as f:
f.write(content)
files_created.append({"path": filepath, "type": "file", "size": len(content)})
return {
"generated_at": datetime.now().isoformat(),
"project_name": config.get("name", "my-project"),
"stack": stack,
"output_dir": output_dir,
"files_created": files_created,
"total_files": len([f for f in files_created if f["type"] == "file"]),
"total_dirs": len([f for f in files_created if f["type"] == "directory"]),
"dry_run": dry_run
}
def main():
parser = argparse.ArgumentParser(description="Bootstrap SaaS project from config")
parser.add_argument("input", help="Path to project config JSON")
parser.add_argument("--output-dir", type=str, default="./my-project", help="Output directory")
parser.add_argument("--format", choices=["json", "text"], default="text", help="Output format")
parser.add_argument("--dry-run", action="store_true", help="Preview without creating files")
args = parser.parse_args()
with open(args.input) as f:
config = json.load(f)
result = scaffold_project(config, args.output_dir, args.dry_run)
if args.format == "json":
print(json.dumps(result, indent=2))
else:
print(f"Project '{result['project_name']}' scaffolded at {result['output_dir']}")
print(f"Stack: {result['stack']}")
print(f"Created: {result['total_files']} files, {result['total_dirs']} directories")
if result["dry_run"]:
print("\n[DRY RUN] No files were created. Files that would be created:")
print("\nFiles:")
for f in result["files_created"]:
prefix = "📁" if f["type"] == "directory" else "📄"
size = f" ({f.get('size', 0)} bytes)" if f.get("size") else ""
print(f" {prefix} {f['path']}{size}")
if __name__ == "__main__":
main()