Skip to main content

Advanced Examples

Explore complex real-world scenarios and advanced use cases for gVisor in production environments.

Multi-Tier Web Application

Deploy a complete web application stack with gVisor for enhanced security.

Application Architecture

# Frontend Service
apiVersion: apps/v1
kind: Deployment
metadata:
name: frontend
namespace: secure-webapp
spec:
replicas: 3
selector:
matchLabels:
app: frontend
tier: web
template:
metadata:
labels:
app: frontend
tier: web
spec:
runtimeClassName: gvisor
containers:
- name: nginx
image: nginx:1.21-alpine
ports:
- containerPort: 80
volumeMounts:
- name: config
mountPath: /etc/nginx/conf.d
- name: static-content
mountPath: /usr/share/nginx/html
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "200m"
volumes:
- name: config
configMap:
name: nginx-config
- name: static-content
emptyDir: {}
---
# API Service
apiVersion: apps/v1
kind: Deployment
metadata:
name: api-server
namespace: secure-webapp
spec:
replicas: 2
selector:
matchLabels:
app: api-server
tier: api
template:
metadata:
labels:
app: api-server
tier: api
spec:
runtimeClassName: gvisor
containers:
- name: api
image: node:16-alpine
command: ["node", "server.js"]
ports:
- containerPort: 3000
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: db-credentials
key: connection-string
- name: REDIS_URL
value: "redis://redis-service:6379"
- name: NODE_ENV
value: "production"
volumeMounts:
- name: app-code
mountPath: /app
workingDir: /app
resources:
requests:
memory: "512Mi"
cpu: "200m"
limits:
memory: "1Gi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: app-code
configMap:
name: api-code
---
# Database
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres
namespace: secure-webapp
spec:
serviceName: postgres
replicas: 1
selector:
matchLabels:
app: postgres
tier: database
template:
metadata:
labels:
app: postgres
tier: database
spec:
runtimeClassName: gvisor-kvm # Use KVM for database performance
containers:
- name: postgres
image: postgres:14-alpine
ports:
- containerPort: 5432
env:
- name: POSTGRES_DB
value: webapp
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: db-credentials
key: username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: db-credentials
key: password
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
- name: postgres-config
mountPath: /etc/postgresql/postgresql.conf
subPath: postgresql.conf
resources:
requests:
memory: "1Gi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "1000m"
livenessProbe:
exec:
command:
- pg_isready
- -U
- webapp
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
exec:
command:
- pg_isready
- -U
- webapp
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: postgres-config
configMap:
name: postgres-config
volumeClaimTemplates:
- metadata:
name: postgres-data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi

Supporting Services

# Redis Cache
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: secure-webapp
spec:
replicas: 1
selector:
matchLabels:
app: redis
tier: cache
template:
metadata:
labels:
app: redis
tier: cache
spec:
runtimeClassName: gvisor
containers:
- name: redis
image: redis:7-alpine
ports:
- containerPort: 6379
command: ["redis-server", "/etc/redis/redis.conf"]
volumeMounts:
- name: redis-config
mountPath: /etc/redis
- name: redis-data
mountPath: /data
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "200m"
volumes:
- name: redis-config
configMap:
name: redis-config
- name: redis-data
emptyDir: {}
---
# Services
apiVersion: v1
kind: Service
metadata:
name: frontend-service
namespace: secure-webapp
spec:
selector:
app: frontend
tier: web
ports:
- port: 80
targetPort: 80
type: LoadBalancer
---
apiVersion: v1
kind: Service
metadata:
name: api-service
namespace: secure-webapp
spec:
selector:
app: api-server
tier: api
ports:
- port: 3000
targetPort: 3000
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: postgres-service
namespace: secure-webapp
spec:
selector:
app: postgres
tier: database
ports:
- port: 5432
targetPort: 5432
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: redis-service
namespace: secure-webapp
spec:
selector:
app: redis
tier: cache
ports:
- port: 6379
targetPort: 6379
type: ClusterIP

Secure CI/CD Pipeline

Implement a secure CI/CD pipeline using gVisor for build and deployment isolation.

Build Pipeline

# Secure Build Environment
apiVersion: v1
kind: Pod
metadata:
name: secure-builder
namespace: ci-cd
spec:
runtimeClassName: gvisor
restartPolicy: Never
serviceAccountName: builder
securityContext:
runAsNonRoot: true
runAsUser: 1000
fsGroup: 1000
containers:
- name: builder
image: docker:20-dind
command: ["/bin/sh"]
args:
- -c
- |
# Start Docker daemon in background
dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2376 &

# Wait for Docker to start
sleep 5

# Clone repository
git clone $REPO_URL /workspace
cd /workspace

# Build application
docker build -t $IMAGE_NAME:$BUILD_ID .

# Security scan
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \
aquasec/trivy image $IMAGE_NAME:$BUILD_ID

# Push to registry
docker push $IMAGE_NAME:$BUILD_ID

env:
- name: REPO_URL
value: "https://github.com/example/secure-app.git"
- name: IMAGE_NAME
value: "registry.example.com/secure-app"
- name: BUILD_ID
value: "build-123"
- name: DOCKER_HOST
value: "tcp://localhost:2376"
volumeMounts:
- name: docker-sock
mountPath: /var/run/docker.sock
- name: docker-certs
mountPath: /certs
- name: workspace
mountPath: /workspace
resources:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "4Gi"
cpu: "2000m"
securityContext:
privileged: false
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
volumes:
- name: docker-sock
emptyDir: {}
- name: docker-certs
emptyDir: {}
- name: workspace
emptyDir: {}

Deployment Pipeline

# Secure Deployment Job
apiVersion: batch/v1
kind: Job
metadata:
name: secure-deployer
namespace: ci-cd
spec:
template:
spec:
runtimeClassName: gvisor
restartPolicy: Never
serviceAccountName: deployer
containers:
- name: deployer
image: bitnami/kubectl:latest
command: ["/bin/bash"]
args:
- -c
- |
# Validate deployment manifests
kubectl --dry-run=client apply -f /manifests/

# Apply security policies
kubectl apply -f /policies/

# Deploy application
kubectl apply -f /manifests/

# Wait for rollout
kubectl rollout status deployment/secure-app -n production

# Run smoke tests
/scripts/smoke-test.sh

volumeMounts:
- name: manifests
mountPath: /manifests
- name: policies
mountPath: /policies
- name: scripts
mountPath: /scripts
- name: kubeconfig
mountPath: /root/.kube
env:
- name: KUBECONFIG
value: "/root/.kube/config"
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "200m"
volumes:
- name: manifests
configMap:
name: deployment-manifests
- name: policies
configMap:
name: security-policies
- name: scripts
configMap:
name: test-scripts
defaultMode: 0755
- name: kubeconfig
secret:
secretName: kubeconfig

Microservices with Service Mesh

Deploy a microservices architecture with Istio service mesh and gVisor.

Service Mesh Configuration

# Enable Istio sidecar injection with gVisor
apiVersion: v1
kind: Namespace
metadata:
name: microservices
labels:
istio-injection: enabled
runtime: gvisor
---
# Custom sidecar injection template for gVisor
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
metadata:
name: gvisor-mesh
namespace: istio-system
spec:
values:
pilot:
env:
EXTERNAL_ISTIOD: false
global:
proxy:
runtimeClassName: gvisor
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 256Mi

Microservices Deployment

# User Service
apiVersion: apps/v1
kind: Deployment
metadata:
name: user-service
namespace: microservices
spec:
replicas: 2
selector:
matchLabels:
app: user-service
version: v1
template:
metadata:
labels:
app: user-service
version: v1
spec:
runtimeClassName: gvisor
containers:
- name: user-service
image: user-service:v1
ports:
- containerPort: 8080
env:
- name: SERVICE_NAME
value: "user-service"
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: user-db-secret
key: url
resources:
requests:
memory: "256Mi"
cpu: "200m"
limits:
memory: "512Mi"
cpu: "400m"
livenessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 30
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 5
---
# Order Service
apiVersion: apps/v1
kind: Deployment
metadata:
name: order-service
namespace: microservices
spec:
replicas: 3
selector:
matchLabels:
app: order-service
version: v1
template:
metadata:
labels:
app: order-service
version: v1
spec:
runtimeClassName: gvisor
containers:
- name: order-service
image: order-service:v1
ports:
- containerPort: 8080
env:
- name: SERVICE_NAME
value: "order-service"
- name: USER_SERVICE_URL
value: "http://user-service:8080"
- name: PAYMENT_SERVICE_URL
value: "http://payment-service:8080"
resources:
requests:
memory: "512Mi"
cpu: "300m"
limits:
memory: "1Gi"
cpu: "600m"
---
# Payment Service (PCI compliance)
apiVersion: apps/v1
kind: Deployment
metadata:
name: payment-service
namespace: microservices
spec:
replicas: 2
selector:
matchLabels:
app: payment-service
version: v1
template:
metadata:
labels:
app: payment-service
version: v1
annotations:
sidecar.istio.io/proxyCPU: "200m"
sidecar.istio.io/proxyMemory: "256Mi"
spec:
runtimeClassName: gvisor-kvm # Enhanced security for payment processing
securityContext:
runAsNonRoot: true
runAsUser: 10000
fsGroup: 10000
containers:
- name: payment-service
image: payment-service:v1
ports:
- containerPort: 8080
env:
- name: SERVICE_NAME
value: "payment-service"
- name: ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: payment-secrets
key: encryption-key
resources:
requests:
memory: "512Mi"
cpu: "400m"
limits:
memory: "1Gi"
cpu: "800m"
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
volumeMounts:
- name: tmp
mountPath: /tmp
volumes:
- name: tmp
emptyDir: {}

Traffic Management

# Virtual Service for routing
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: microservices-routes
namespace: microservices
spec:
hosts:
- api.example.com
gateways:
- microservices-gateway
http:
- match:
- uri:
prefix: /users
route:
- destination:
host: user-service
port:
number: 8080
timeout: 10s
retries:
attempts: 3
perTryTimeout: 3s
- match:
- uri:
prefix: /orders
route:
- destination:
host: order-service
port:
number: 8080
timeout: 15s
- match:
- uri:
prefix: /payments
route:
- destination:
host: payment-service
port:
number: 8080
timeout: 30s
headers:
request:
add:
x-security-context: "pci-compliant"
---
# Security policies
apiVersion: security.istio.io/v1beta1
kind: PeerAuthentication
metadata:
name: default
namespace: microservices
spec:
mtls:
mode: STRICT
---
apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy
metadata:
name: payment-access
namespace: microservices
spec:
selector:
matchLabels:
app: payment-service
rules:
- from:
- source:
principals: ["cluster.local/ns/microservices/sa/order-service"]
- operation:
methods: ["POST"]
paths: ["/process-payment"]

Machine Learning Workloads

Deploy secure ML training and inference workloads using gVisor.

ML Training Job

# Secure ML Training Environment
apiVersion: batch/v1
kind: Job
metadata:
name: secure-ml-training
namespace: ml-platform
spec:
parallelism: 1
completions: 1
template:
spec:
runtimeClassName: gvisor-kvm
restartPolicy: Never
containers:
- name: trainer
image: tensorflow/tensorflow:2.8.0-gpu
command: ["python"]
args: ["/workspace/train.py"]
env:
- name: CUDA_VISIBLE_DEVICES
value: "0"
- name: TF_CPP_MIN_LOG_LEVEL
value: "2"
- name: DATASET_PATH
value: "/data/training"
- name: MODEL_OUTPUT_PATH
value: "/models/output"
- name: CHECKPOINT_PATH
value: "/checkpoints"
volumeMounts:
- name: workspace
mountPath: /workspace
- name: training-data
mountPath: /data
readOnly: true
- name: model-output
mountPath: /models
- name: checkpoints
mountPath: /checkpoints
- name: dev-shm
mountPath: /dev/shm
resources:
requests:
memory: "8Gi"
cpu: "4000m"
nvidia.com/gpu: 1
limits:
memory: "16Gi"
cpu: "8000m"
nvidia.com/gpu: 1
securityContext:
runAsNonRoot: true
runAsUser: 1000
allowPrivilegeEscalation: false
volumes:
- name: workspace
configMap:
name: ml-training-code
- name: training-data
persistentVolumeClaim:
claimName: training-data-pvc
- name: model-output
persistentVolumeClaim:
claimName: model-output-pvc
- name: checkpoints
emptyDir:
sizeLimit: 10Gi
- name: dev-shm
emptyDir:
medium: Memory
sizeLimit: 2Gi
nodeSelector:
accelerator: nvidia-tesla-v100
tolerations:
- key: nvidia.com/gpu
operator: Exists
effect: NoSchedule

ML Inference Service

# Secure ML Inference API
apiVersion: apps/v1
kind: Deployment
metadata:
name: ml-inference-api
namespace: ml-platform
spec:
replicas: 3
selector:
matchLabels:
app: ml-inference
tier: api
template:
metadata:
labels:
app: ml-inference
tier: api
spec:
runtimeClassName: gvisor
containers:
- name: inference-server
image: ml-inference-server:v1.2
ports:
- containerPort: 8000
- containerPort: 8001 # gRPC
command: ["/opt/ml/serve"]
args:
- --model-store=/models
- --models=model1=model1.tar.gz
- --foreground
env:
- name: PYTHONUNBUFFERED
value: "1"
- name: OMP_NUM_THREADS
value: "2"
volumeMounts:
- name: model-store
mountPath: /models
readOnly: true
- name: inference-cache
mountPath: /cache
resources:
requests:
memory: "2Gi"
cpu: "1000m"
limits:
memory: "4Gi"
cpu: "2000m"
livenessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 60
periodSeconds: 30
readinessProbe:
httpGet:
path: /models
port: 8000
initialDelaySeconds: 30
periodSeconds: 10
- name: model-cache-warmer
image: model-cache-warmer:latest
env:
- name: INFERENCE_ENDPOINT
value: "http://localhost:8000"
- name: WARM_UP_MODELS
value: "model1"
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "200m"
volumes:
- name: model-store
persistentVolumeClaim:
claimName: ml-models-pvc
- name: inference-cache
emptyDir:
sizeLimit: 5Gi
---
# Autoscaling for inference
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: ml-inference-hpa
namespace: ml-platform
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: ml-inference-api
minReplicas: 2
maxReplicas: 20
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
- type: Pods
pods:
metric:
name: requests_per_second
target:
type: AverageValue
averageValue: "100"

Serverless Computing Platform

Build a secure serverless platform using gVisor for function isolation.

Function Runtime

# Serverless Function Runtime
apiVersion: apps/v1
kind: Deployment
metadata:
name: function-runner
namespace: serverless
spec:
replicas: 5
selector:
matchLabels:
app: function-runner
template:
metadata:
labels:
app: function-runner
spec:
runtimeClassName: gvisor
containers:
- name: runner
image: function-runtime:latest
ports:
- containerPort: 8080
env:
- name: FUNCTION_TIMEOUT
value: "300"
- name: MAX_MEMORY
value: "512Mi"
- name: RUNTIME_MODE
value: "sandbox"
resources:
requests:
memory: "1Gi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "1000m"
securityContext:
runAsNonRoot: true
runAsUser: 65534 # nobody
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
volumeMounts:
- name: function-code
mountPath: /functions
readOnly: true
- name: tmp-execution
mountPath: /tmp
- name: var-tmp
mountPath: /var/tmp
volumes:
- name: function-code
emptyDir: {}
- name: tmp-execution
emptyDir:
sizeLimit: 100Mi
- name: var-tmp
emptyDir:
sizeLimit: 50Mi
nodeSelector:
function-node: "true"
---
# Function execution job template
apiVersion: batch/v1
kind: Job
metadata:
name: execute-function-template
namespace: serverless
spec:
template:
spec:
runtimeClassName: gvisor
restartPolicy: Never
activeDeadlineSeconds: 300
containers:
- name: executor
image: function-executor:latest
command: ["/executor"]
env:
- name: FUNCTION_NAME
value: "placeholder"
- name: FUNCTION_LANG
value: "nodejs"
- name: EXECUTION_TIMEOUT
value: "60"
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
volumeMounts:
- name: function-workspace
mountPath: /workspace
- name: tmp-dir
mountPath: /tmp
volumes:
- name: function-workspace
emptyDir:
sizeLimit: 100Mi
- name: tmp-dir
emptyDir:
sizeLimit: 50Mi

Multi-Cloud Deployment

Deploy applications across multiple cloud providers using gVisor for consistent security.

AWS EKS Configuration

# AWS-specific gVisor configuration
apiVersion: v1
kind: ConfigMap
metadata:
name: gvisor-aws-config
namespace: kube-system
data:
runsc.toml: |
[runsc_config]
platform = "ptrace" # KVM may not be available in some instance types
network = "sandbox"
file-access = "shared"

# AWS-optimized settings
total-memory = "4GB"
num-cpus = 2

# EBS volume optimization
overlay = true
vfs2 = true
---
apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
name: gvisor-aws
handler: runsc
overhead:
podFixed:
memory: "20Mi"
cpu: "10m"
scheduling:
nodeClassMap:
aws-node-type: "secure"

GCP GKE Configuration

# GCP-specific gVisor configuration  
apiVersion: v1
kind: ConfigMap
metadata:
name: gvisor-gcp-config
namespace: kube-system
data:
runsc.toml: |
[runsc_config]
platform = "kvm" # KVM available on GCP
network = "sandbox"
file-access = "shared"

# GCP-optimized settings
total-memory = "8GB"
num-cpus = 4
huge-pages = true

# Persistent disk optimization
overlay2 = "root=/var/lib/gvisor/overlay"
---
apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
name: gvisor-gcp
handler: runsc
overhead:
podFixed:
memory: "30Mi"
cpu: "15m"

Cross-Cloud Application

# Application that runs consistently across clouds
apiVersion: apps/v1
kind: Deployment
metadata:
name: multi-cloud-app
spec:
replicas: 3
selector:
matchLabels:
app: multi-cloud-app
template:
metadata:
labels:
app: multi-cloud-app
spec:
runtimeClassName: gvisor # Will use cloud-specific config
containers:
- name: app
image: multi-cloud-app:v1
ports:
- containerPort: 8080
env:
- name: CLOUD_PROVIDER
valueFrom:
fieldRef:
fieldPath: metadata.labels['cloud-provider']
- name: REGION
valueFrom:
fieldRef:
fieldPath: metadata.labels['region']
resources:
requests:
memory: "256Mi"
cpu: "200m"
limits:
memory: "512Mi"
cpu: "400m"
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
preference:
matchExpressions:
- key: cloud-provider
operator: In
values: ["aws", "gcp", "azure"]

These advanced examples demonstrate how gVisor can be used in complex, production-grade scenarios while maintaining security, performance, and operational excellence. Each example includes proper resource management, security configurations, and monitoring considerations.

Next Steps

Explore additional topics: