Overview
This comprehensive guide demonstrates how to set up a complete Docker containerization environment using Cloud-Init. You'll learn to deploy a multi-service application stack with Nginx, Node.js, Redis, PostgreSQL, and monitoring tools.
Multi-Service Stack
Complete application stack with database, cache, and monitoring
Security Hardened
Built-in security controls and best practices
Production Ready
Monitoring, logging, and scalability features
Prerequisites
Important Requirements
Ensure you have the following before starting the deployment process.
Infrastructure Requirements
- AWS account with appropriate permissions
- Terraform installed (v1.0+)
- AWS CLI configured
- SSH key pair for EC2 access
Knowledge Requirements
- Basic Docker concepts
- Linux system administration
- Basic networking concepts
- Understanding of web applications
Complete Docker Cloud-Init Configuration
Comprehensive Cloud-Init configuration for Docker containerization with multi-service stack
Multi-Service Stack
This configuration deploys Nginx, Node.js app, Redis, PostgreSQL, Prometheus, and Grafana with security hardening.
#cloud-config
# Docker Containerization Setup with Cloud-Init
# Author: RFS Security Research
# Version: 1.0
# System updates and package installation
package_update: true
package_upgrade: true
packages:
- apt-transport-https
- ca-certificates
- curl
- gnupg
- lsb-release
- software-properties-common
- jq
- htop
- vim
- git
- unzip
- wget
- fail2ban
- ufw
# Docker repository setup
apt:
sources:
docker:
source: "deb [arch=amd64] https://download.docker.com/linux/ubuntu $RELEASE stable"
keyid: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
# System configuration files
write_files:
# Docker daemon configuration
- path: /etc/docker/daemon.json
content: |
{
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"exec-opts": ["native.cgroupdriver=systemd"],
"live-restore": true,
"userland-proxy": false,
"no-new-privileges": true,
"seccomp-profile": "/etc/docker/seccomp.json",
"default-ulimits": {
"nofile": {
"Name": "nofile",
"Hard": 64000,
"Soft": 64000
}
},
"features": {
"buildkit": true
}
}
owner: root:root
permissions: '0644'
# Docker security profile
- path: /etc/docker/seccomp.json
content: |
{
"defaultAction": "SCMP_ACT_ERRNO",
"archMap": [
{
"architecture": "SCMP_ARCH_X86_64",
"subArchitectures": [
"SCMP_ARCH_X86",
"SCMP_ARCH_X32"
]
}
],
"syscalls": [
{
"names": [
"accept",
"accept4",
"access",
"adjtimex",
"alarm",
"bind",
"brk",
"capget",
"capset",
"chdir",
"chmod",
"chown",
"chown32",
"clock_getres",
"clock_gettime",
"clock_nanosleep",
"close",
"connect",
"copy_file_range",
"creat",
"dup",
"dup2",
"dup3",
"epoll_create",
"epoll_create1",
"epoll_ctl",
"epoll_pwait",
"epoll_wait",
"eventfd",
"eventfd2",
"execve",
"execveat",
"exit",
"exit_group",
"faccessat",
"fadvise64",
"fadvise64_64",
"fallocate",
"fanotify_mark",
"fchdir",
"fchmod",
"fchmodat",
"fchown",
"fchown32",
"fchownat",
"fcntl",
"fcntl64",
"fdatasync",
"fgetxattr",
"flistxattr",
"flock",
"fork",
"fremovexattr",
"fsetxattr",
"fstat",
"fstat64",
"fstatat64",
"fstatfs",
"fstatfs64",
"fsync",
"ftruncate",
"ftruncate64",
"futex",
"getcwd",
"getdents",
"getdents64",
"getegid",
"getegid32",
"geteuid",
"geteuid32",
"getgid",
"getgid32",
"getgroups",
"getgroups32",
"getitimer",
"getpeername",
"getpgid",
"getpgrp",
"getpid",
"getppid",
"getpriority",
"getrandom",
"getresgid",
"getresgid32",
"getresuid",
"getresuid32",
"getrlimit",
"get_robust_list",
"getrusage",
"getsid",
"getsockname",
"getsockopt",
"get_thread_area",
"gettid",
"gettimeofday",
"getuid",
"getuid32",
"getxattr",
"inotify_add_watch",
"inotify_init",
"inotify_init1",
"inotify_rm_watch",
"io_cancel",
"ioctl",
"io_destroy",
"io_getevents",
"ioprio_get",
"ioprio_set",
"io_setup",
"io_submit",
"ipc",
"kill",
"lchown",
"lchown32",
"lgetxattr",
"link",
"linkat",
"listen",
"listxattr",
"llistxattr",
"lremovexattr",
"lseek",
"lsetxattr",
"lstat",
"lstat64",
"madvise",
"memfd_create",
"mincore",
"mkdir",
"mkdirat",
"mknod",
"mknodat",
"mlock",
"mlock2",
"mlockall",
"mmap",
"mmap2",
"mprotect",
"mq_getsetattr",
"mq_notify",
"mq_open",
"mq_timedreceive",
"mq_timedsend",
"mq_unlink",
"mremap",
"msgctl",
"msgget",
"msgrcv",
"msgsnd",
"msync",
"munlock",
"munlockall",
"munmap",
"nanosleep",
"newfstatat",
"open",
"openat",
"pause",
"pipe",
"pipe2",
"poll",
"ppoll",
"prctl",
"pread64",
"preadv",
"prlimit64",
"pselect6",
"ptrace",
"pwrite64",
"pwritev",
"read",
"readahead",
"readlink",
"readlinkat",
"readv",
"recv",
"recvfrom",
"recvmmsg",
"recvmsg",
"remap_file_pages",
"removexattr",
"rename",
"renameat",
"renameat2",
"restart_syscall",
"rmdir",
"rt_sigaction",
"rt_sigpending",
"rt_sigprocmask",
"rt_sigqueueinfo",
"rt_sigreturn",
"rt_sigsuspend",
"rt_sigtimedwait",
"rt_tgsigqueueinfo",
"sched_getaffinity",
"sched_getattr",
"sched_getparam",
"sched_get_priority_max",
"sched_get_priority_min",
"sched_getscheduler",
"sched_rr_get_interval",
"sched_setaffinity",
"sched_setattr",
"sched_setparam",
"sched_setscheduler",
"sched_yield",
"seccomp",
"select",
"semctl",
"semget",
"semop",
"semtimedop",
"send",
"sendfile",
"sendfile64",
"sendmmsg",
"sendmsg",
"sendto",
"setfsgid",
"setfsgid32",
"setfsuid",
"setfsuid32",
"setgid",
"setgid32",
"setgroups",
"setgroups32",
"setitimer",
"setpgid",
"setpriority",
"setregid",
"setregid32",
"setresgid",
"setresgid32",
"setresuid",
"setresuid32",
"setreuid",
"setreuid32",
"setrlimit",
"set_robust_list",
"setsid",
"setsockopt",
"set_thread_area",
"set_tid_address",
"setuid",
"setuid32",
"setxattr",
"shmat",
"shmctl",
"shmdt",
"shmget",
"shutdown",
"sigaltstack",
"signalfd",
"signalfd4",
"sigreturn",
"socket",
"socketcall",
"socketpair",
"splice",
"stat",
"stat64",
"statfs",
"statfs64",
"statx",
"symlink",
"symlinkat",
"sync",
"sync_file_range",
"syncfs",
"sysinfo",
"tee",
"tgkill",
"time",
"timer_create",
"timer_delete",
"timer_getoverrun",
"timer_gettime",
"timer_settime",
"times",
"tkill",
"truncate",
"truncate64",
"ugetrlimit",
"umask",
"uname",
"unlink",
"unlinkat",
"utime",
"utimensat",
"utimes",
"vfork",
"vmsplice",
"wait4",
"waitid",
"waitpid",
"write",
"writev"
],
"action": "SCMP_ACT_ALLOW"
}
]
}
owner: root:root
permissions: '0644'
# Docker Compose configuration
- path: /opt/docker-compose.yml
content: |
version: '3.8'
services:
# Nginx reverse proxy
nginx:
image: nginx:alpine
container_name: nginx-proxy
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/ssl:/etc/nginx/ssl:ro
- nginx_logs:/var/log/nginx
networks:
- web
restart: unless-stopped
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
read_only: true
tmpfs:
- /var/cache/nginx:noexec,nosuid,size=100m
- /var/run:noexec,nosuid,size=100m
# Application container
app:
image: node:18-alpine
container_name: web-app
working_dir: /app
volumes:
- ./app:/app:ro
- app_data:/app/data
networks:
- web
- backend
environment:
- NODE_ENV=production
- PORT=3000
command: ["npm", "start"]
restart: unless-stopped
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
read_only: true
tmpfs:
- /tmp:noexec,nosuid,size=100m
user: "1000:1000"
# Redis cache
redis:
image: redis:7-alpine
container_name: redis-cache
volumes:
- redis_data:/data
- ./redis/redis.conf:/usr/local/etc/redis/redis.conf:ro
networks:
- backend
command: ["redis-server", "/usr/local/etc/redis/redis.conf"]
restart: unless-stopped
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
read_only: true
tmpfs:
- /tmp:noexec,nosuid,size=100m
# PostgreSQL database
postgres:
image: postgres:15-alpine
container_name: postgres-db
environment:
- POSTGRES_DB=appdb
- POSTGRES_USER=appuser
- POSTGRES_PASSWORD_FILE=/run/secrets/db_password
volumes:
- postgres_data:/var/lib/postgresql/data
- ./postgres/init.sql:/docker-entrypoint-initdb.d/init.sql:ro
networks:
- backend
restart: unless-stopped
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
secrets:
- db_password
# Monitoring with Prometheus
prometheus:
image: prom/prometheus:latest
container_name: prometheus
ports:
- "9090:9090"
volumes:
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
networks:
- monitoring
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
restart: unless-stopped
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
user: "65534:65534"
# Grafana dashboard
grafana:
image: grafana/grafana:latest
container_name: grafana
ports:
- "3000:3000"
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/provisioning:/etc/grafana/provisioning:ro
networks:
- monitoring
environment:
- GF_SECURITY_ADMIN_PASSWORD_FILE=/run/secrets/grafana_password
- GF_USERS_ALLOW_SIGN_UP=false
- GF_SERVER_DOMAIN=localhost
- GF_SMTP_ENABLED=false
- GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource
restart: unless-stopped
security_opt:
- no-new-privileges:true
cap_drop:
- ALL
secrets:
- grafana_password
user: "472:472"
# Networks
networks:
web:
driver: bridge
backend:
driver: bridge
internal: true
monitoring:
driver: bridge
# Volumes
volumes:
nginx_logs:
app_data:
redis_data:
postgres_data:
prometheus_data:
grafana_data:
# Secrets
secrets:
db_password:
file: ./secrets/db_password.txt
grafana_password:
file: ./secrets/grafana_password.txt
owner: root:root
permissions: '0644'
# Nginx configuration
- path: /opt/nginx/nginx.conf
content: |
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
use epoll;
multi_accept on;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;
# Logging
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
# Performance
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
server_tokens off;
# Gzip compression
gzip on;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_types
text/plain
text/css
text/xml
text/javascript
application/json
application/javascript
application/xml+rss
application/atom+xml
image/svg+xml;
# Rate limiting
limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=login:10m rate=1r/s;
# Upstream backend
upstream app_backend {
server app:3000;
keepalive 32;
}
server {
listen 80;
server_name localhost;
# Security
location ~ /\. {
deny all;
access_log off;
log_not_found off;
}
# API endpoints
location /api/ {
limit_req zone=api burst=20 nodelay;
proxy_pass http://app_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
}
# Static files
location /static/ {
expires 1y;
add_header Cache-Control "public, immutable";
access_log off;
}
# Health check
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
# Default location
location / {
proxy_pass http://app_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
}
}
}
owner: root:root
permissions: '0644'
# Redis configuration
- path: /opt/redis/redis.conf
content: |
# Network
bind 0.0.0.0
port 6379
timeout 300
tcp-keepalive 60
# General
daemonize no
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile ""
databases 16
# Snapshotting
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /data
# Security
requirepass changeme123
rename-command FLUSHDB ""
rename-command FLUSHALL ""
rename-command DEBUG ""
rename-command CONFIG ""
# Memory management
maxmemory 256mb
maxmemory-policy allkeys-lru
# Append only file
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
owner: root:root
permissions: '0644'
# PostgreSQL initialization
- path: /opt/postgres/init.sql
content: |
-- Create application database and user
CREATE DATABASE appdb;
CREATE USER appuser WITH ENCRYPTED PASSWORD 'changeme123';
GRANT ALL PRIVILEGES ON DATABASE appdb TO appuser;
-- Connect to application database
\c appdb;
-- Create application tables
CREATE TABLE users (
id SERIAL PRIMARY KEY,
username VARCHAR(50) UNIQUE NOT NULL,
email VARCHAR(100) UNIQUE NOT NULL,
password_hash VARCHAR(255) NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE sessions (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
user_id INTEGER REFERENCES users(id) ON DELETE CASCADE,
token VARCHAR(255) UNIQUE NOT NULL,
expires_at TIMESTAMP NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Create indexes
CREATE INDEX idx_users_username ON users(username);
CREATE INDEX idx_users_email ON users(email);
CREATE INDEX idx_sessions_token ON sessions(token);
CREATE INDEX idx_sessions_expires_at ON sessions(expires_at);
-- Grant permissions
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO appuser;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO appuser;
owner: root:root
permissions: '0644'
# Prometheus configuration
- path: /opt/prometheus/prometheus.yml
content: |
global:
scrape_interval: 15s
evaluation_interval: 15s
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'node-exporter'
static_configs:
- targets: ['host.docker.internal:9100']
- job_name: 'docker'
static_configs:
- targets: ['host.docker.internal:9323']
- job_name: 'nginx'
static_configs:
- targets: ['nginx:80']
metrics_path: /metrics
- job_name: 'app'
static_configs:
- targets: ['app:3000']
metrics_path: /metrics
owner: root:root
permissions: '0644'
# Docker setup script
- path: /opt/docker-setup.sh
content: |
#!/bin/bash
set -euo pipefail
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
log() {
echo -e "\$\{GREEN\}[$(date +'%Y-%m-%d %H:%M:%S')] $1\$\{NC\}"
}
warn() {
echo -e "\$\{YELLOW\}[$(date +'%Y-%m-%d %H:%M:%S')] WARNING: $1\$\{NC\}"
}
error() {
echo -e "\$\{RED\}[$(date +'%Y-%m-%d %H:%M:%S')] ERROR: $1\$\{NC\}"
}
log "Starting Docker setup..."
# Install Docker
log "Installing Docker..."
apt-get update
apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
# Start and enable Docker
log "Starting Docker service..."
systemctl start docker
systemctl enable docker
# Add ubuntu user to docker group
log "Adding ubuntu user to docker group..."
usermod -aG docker ubuntu
# Install Docker Compose standalone
log "Installing Docker Compose..."
curl -L "https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
# Create application directories
log "Creating application directories..."
mkdir -p /opt/{nginx,redis,postgres,prometheus,grafana,secrets,app}
# Set proper permissions
chown -R ubuntu:ubuntu /opt
chmod 755 /opt
# Generate secrets
log "Generating secrets..."
openssl rand -base64 32 > /opt/secrets/db_password.txt
openssl rand -base64 32 > /opt/secrets/grafana_password.txt
chmod 600 /opt/secrets/*.txt
chown ubuntu:ubuntu /opt/secrets/*.txt
# Create sample application
log "Creating sample Node.js application..."
cat > /opt/app/package.json << 'EOF'
{
"name": "docker-demo-app",
"version": "1.0.0",
"description": "Demo application for Docker containerization",
"main": "server.js",
"scripts": {
"start": "node server.js"
},
"dependencies": {
"express": "^4.18.2",
"redis": "^4.6.5",
"pg": "^8.10.0",
"prom-client": "^14.2.0"
}
}
EOF
cat > /opt/app/server.js << 'EOF'
const express = require('express');
const redis = require('redis');
const { Pool } = require('pg');
const client = require('prom-client');
const app = express();
const port = process.env.PORT || 3000;
// Prometheus metrics
const register = new client.Registry();
client.collectDefaultMetrics({ register });
const httpRequestsTotal = new client.Counter({
name: 'http_requests_total',
help: 'Total number of HTTP requests',
labelNames: ['method', 'route', 'status_code'],
registers: [register]
});
// Redis client
const redisClient = redis.createClient({
host: 'redis',
port: 6379,
password: 'changeme123'
});
// PostgreSQL client
const pgPool = new Pool({
user: 'appuser',
host: 'postgres',
database: 'appdb',
password: 'changeme123',
port: 5432,
});
app.use(express.json());
// Middleware to track requests
app.use((req, res, next) => {
res.on('finish', () => {
httpRequestsTotal.inc({
method: req.method,
route: req.route?.path || req.path,
status_code: res.statusCode
});
});
next();
});
// Health check endpoint
app.get('/health', (req, res) => {
res.status(200).json({ status: 'healthy', timestamp: new Date().toISOString() });
});
// Metrics endpoint
app.get('/metrics', async (req, res) => {
res.set('Content-Type', register.contentType);
res.end(await register.metrics());
});
// Redis test endpoint
app.get('/redis', async (req, res) => {
try {
await redisClient.set('test', 'Hello from Redis!');
const value = await redisClient.get('test');
res.json({ redis: value });
} catch (error) {
res.status(500).json({ error: error.message });
}
});
// PostgreSQL test endpoint
app.get('/postgres', async (req, res) => {
try {
const result = await pgPool.query('SELECT NOW() as current_time');
res.json({ postgres: result.rows[0] });
} catch (error) {
res.status(500).json({ error: error.message });
}
});
// Root endpoint
app.get('/', (req, res) => {
res.json({
message: 'Docker Containerization Demo',
version: '1.0.0',
timestamp: new Date().toISOString(),
endpoints: {
health: '/health',
metrics: '/metrics',
redis: '/redis',
postgres: '/postgres'
}
});
});
app.listen(port, '0.0.0.0', () => {
console.log('Server running on port ' + port);
});
EOF
# Set ownership
chown -R 1000:1000 /opt/app
# Configure firewall
log "Configuring firewall..."
ufw --force enable
ufw default deny incoming
ufw default allow outgoing
ufw allow ssh
ufw allow 80/tcp
ufw allow 443/tcp
ufw allow 3000/tcp
ufw allow 9090/tcp
# Configure fail2ban
log "Configuring fail2ban..."
systemctl enable fail2ban
systemctl start fail2ban
# Start containers
log "Starting Docker containers..."
cd /opt
docker-compose up -d
# Wait for services to start
log "Waiting for services to start..."
sleep 30
# Install Node.js dependencies
log "Installing Node.js dependencies..."
docker-compose exec -T app npm install
# Restart app container
docker-compose restart app
# Display status
log "Docker setup completed!"
echo "=========================================="
echo "Docker Containerization Setup Complete!"
echo "=========================================="
echo "Services:"
echo "- Web Application: http://$(curl -s ifconfig.me):80"
echo "- Prometheus: http://$(curl -s ifconfig.me):9090"
echo "- Grafana: http://$(curl -s ifconfig.me):3000"
echo ""
echo "Credentials:"
echo "- Grafana Admin: admin / $(cat /opt/secrets/grafana_password.txt)"
echo "- Database Password: $(cat /opt/secrets/db_password.txt)"
echo ""
echo "Container Status:"
docker-compose ps
echo ""
echo "Logs: docker-compose logs -f"
echo "=========================================="
log "Setup completed successfully!"
owner: root:root
permissions: '0755'
# User configuration
users:
- name: ubuntu
groups: [adm, cdrom, sudo, dip, plugdev, lxd, docker]
shell: /bin/bash
sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC... # Replace with your SSH public key
# Commands to run
runcmd:
- systemctl daemon-reload
- /opt/docker-setup.sh > /var/log/docker-setup.log 2>&1
# Final message
final_message: |
Docker Containerization setup completed!
Access your services:
- Web App: http://<server-ip>:80
- Prometheus: http://<server-ip>:9090
- Grafana: http://<server-ip>:3000
Management:
- SSH: ssh ubuntu@<server-ip>
- Logs: docker-compose logs -f
- Status: docker-compose ps
Setup log: /var/log/docker-setup.log
System ready at $TIMESTAMP
Terraform Infrastructure Configuration
Complete Terraform configuration for deploying the Docker containerization environment
Infrastructure as Code
This Terraform configuration creates a complete VPC, security groups, and EC2 instance optimized for Docker workloads.
# Docker Containerization Terraform Configuration
# Author: RFS Security Research
# Version: 1.0
terraform {
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
}
provider "aws" {
region = var.aws_region
}
# Variables
variable "aws_region" {
description = "AWS region for resources"
type = string
default = "us-west-2"
}
variable "instance_name" {
description = "Name of the Docker instance"
type = string
default = "docker-containerization"
}
variable "instance_type" {
description = "EC2 instance type"
type = string
default = "t3.medium"
}
variable "ssh_public_key" {
description = "SSH public key for EC2 access"
type = string
}
variable "allowed_cidr_blocks" {
description = "CIDR blocks allowed to access the instance"
type = list(string)
default = ["0.0.0.0/0"]
}
# Data sources
data "aws_availability_zones" "available" {
state = "available"
}
data "aws_ami" "ubuntu" {
most_recent = true
owners = ["099720109477"] # Canonical
filter {
name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-22.04-amd64-server-*"]
}
filter {
name = "virtualization-type"
values = ["hvm"]
}
}
# VPC and Networking
resource "aws_vpc" "docker_vpc" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "${var.instance_name}-vpc"
}
}
resource "aws_internet_gateway" "docker_igw" {
vpc_id = aws_vpc.docker_vpc.id
tags = {
Name = "${var.instance_name}-igw"
}
}
resource "aws_subnet" "docker_subnet" {
vpc_id = aws_vpc.docker_vpc.id
cidr_block = "10.0.1.0/24"
availability_zone = data.aws_availability_zones.available.names[0]
map_public_ip_on_launch = true
tags = {
Name = "${var.instance_name}-subnet"
}
}
resource "aws_route_table" "docker_rt" {
vpc_id = aws_vpc.docker_vpc.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.docker_igw.id
}
tags = {
Name = "${var.instance_name}-rt"
}
}
resource "aws_route_table_association" "docker_rta" {
subnet_id = aws_subnet.docker_subnet.id
route_table_id = aws_route_table.docker_rt.id
}
# Security Group
resource "aws_security_group" "docker_sg" {
name = "${var.instance_name}-sg"
description = "Security group for Docker containerization instance"
vpc_id = aws_vpc.docker_vpc.id
# SSH access
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = var.allowed_cidr_blocks
}
# HTTP access
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = var.allowed_cidr_blocks
}
# HTTPS access
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = var.allowed_cidr_blocks
}
# Application port
ingress {
from_port = 3000
to_port = 3000
protocol = "tcp"
cidr_blocks = var.allowed_cidr_blocks
}
# Prometheus
ingress {
from_port = 9090
to_port = 9090
protocol = "tcp"
cidr_blocks = var.allowed_cidr_blocks
}
# All outbound traffic
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${var.instance_name}-sg"
}
}
# Key Pair
resource "aws_key_pair" "docker_key" {
key_name = "${var.instance_name}-key"
public_key = var.ssh_public_key
}
# EC2 Instance
resource "aws_instance" "docker_instance" {
ami = data.aws_ami.ubuntu.id
instance_type = var.instance_type
key_name = aws_key_pair.docker_key.key_name
vpc_security_group_ids = [aws_security_group.docker_sg.id]
subnet_id = aws_subnet.docker_subnet.id
user_data = base64encode(templatefile("${path.module}/docker-cloud-init.yaml", {
ssh_public_key = var.ssh_public_key
}))
root_block_device {
volume_type = "gp3"
volume_size = 30
encrypted = true
}
tags = {
Name = var.instance_name
Type = "Docker Containerization"
}
}
# Outputs
output "instance_public_ip" {
description = "Public IP address of the Docker instance"
value = aws_instance.docker_instance.public_ip
}
output "instance_private_ip" {
description = "Private IP address of the Docker instance"
value = aws_instance.docker_instance.private_ip
}
output "ssh_command" {
description = "SSH command to connect to the instance"
value = "ssh -i ~/.ssh/id_rsa ubuntu@${aws_instance.docker_instance.public_ip}"
}
output "web_application_url" {
description = "URL to access the web application"
value = "http://${aws_instance.docker_instance.public_ip}"
}
output "prometheus_url" {
description = "URL to access Prometheus"
value = "http://${aws_instance.docker_instance.public_ip}:9090"
}
output "grafana_url" {
description = "URL to access Grafana"
value = "http://${aws_instance.docker_instance.public_ip}:3000"
}
Deployment Steps
Follow these steps to deploy your Docker containerization environment
1
Prepare Terraform Files
Create the necessary files for your deployment:
mkdir docker-containerization && cd docker-containerization
# Save the Terraform configuration as main.tf
# Save Docker Cloud-Init as docker-cloud-init.yaml
2
Configure Variables
Create a terraform.tfvars file with your configuration:
aws_region = "us-west-2"
instance_name = "docker-demo"
instance_type = "t3.medium"
ssh_public_key = "ssh-rsa AAAAB3NzaC1yc2E..."
allowed_cidr_blocks = ["0.0.0.0/0"]
3
Deploy Infrastructure
Initialize and apply Terraform configuration:
terraform init
terraform plan
terraform apply
4
Verify Deployment
Check that all services are running correctly:
# SSH to the instance
ssh ubuntu@<instance-ip>
# Check Docker containers
docker-compose ps
# View logs
docker-compose logs -f
Security Features
Built-in security controls and hardening measures
Container Security
- Seccomp security profiles
- No new privileges flag
- Capability dropping
- Read-only root filesystems
- Non-root user execution
Network Security
- Network isolation
- Internal networks for backend
- UFW firewall configuration
- Fail2ban intrusion prevention
- Rate limiting
Monitoring and Observability
Built-in monitoring stack with Prometheus and Grafana
Prometheus Metrics
- Container resource usage
- Application performance metrics
- HTTP request tracking
- Database connection monitoring
Grafana Dashboards
- Real-time system overview
- Container health status
- Performance analytics
- Alert management
Access Information
After deployment, access Prometheus at :9090 and Grafana at :3000. Default Grafana credentials are admin/admin (change on first login).
Troubleshooting
Common issues and their solutions
Container Issues
Problem: Containers not starting
Solution:
docker-compose logs <service-name>
docker-compose restart <service-name>
Network Connectivity
Problem: Services cannot communicate
Solution:
docker network ls
docker-compose exec <service> ping <other-service>
Performance Issues
Problem: High resource usage
Solution:
docker stats
docker-compose top
Quick Info
Difficulty
Intermediate
Time Required
30-45 minutes
Cost Estimate
$20-40/month
Instance Type
t3.medium
Services Included
Nginx (Reverse Proxy)
Node.js Application
Redis Cache
PostgreSQL Database
Prometheus Monitoring
Grafana Dashboard
Related Guides