Skip to content

Linux_Practical_Interview_1251 1500

Linux Practical Interview Questions (1251-1500)

Section titled “Linux Practical Interview Questions (1251-1500)”

Q1251: How do you configure kernel parameters at boot?

Section titled “Q1251: How do you configure kernel parameters at boot?”

Answer: Kernel parameters can be set at boot time through GRUB:

Terminal window
# Edit GRUB config
vim /etc/default/grub
# Add to GRUB_CMDLINE_LINUX
GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0 crashkernel=auto"
# Regenerate GRUB
update-grub # Debian/Ubuntu
grub2-mkconfig -o /boot/grub2/grub.cfg # RHEL/CentOS
# View current parameters
cat /proc/cmdline
# Temporary changes (current session only)
sysctl -w parameter=value
# Permanent changes
# /etc/sysctl.conf or /etc/sysctl.d/99-custom.conf

Q1252: How do you tune network kernel parameters?

Section titled “Q1252: How do you tune network kernel parameters?”

Answer:

/etc/sysctl.conf
# Network core
net.core.somaxconn=65535
net.core.netdev_max_backlog=65535
net.core.rmem_max=16777216
net.core.wmem_max=16777216
net.core.optmem_max=25165824
# TCP tuning
net.ipv4.tcp_rmem=4096 87380 16777216
net.ipv4.tcp_wmem=4096 65536 16777216
net.ipv4.tcp_congestion_control=cubic
net.ipv4.tcp_fastopen=3
net.ipv4.tcp_max_syn_backlog=8192
net.ipv4.tcp_fin_timeout=15
net.ipv4.tcp_keepalive_time=600
net.ipv4.tcp_keepalive_intvl=60
net.ipv4.tcp_keepalive_probes=5
net.ipv4.tcp_tw_reuse=1
# Apply
sysctl -p

Q1253: How do you optimize memory management?

Section titled “Q1253: How do you optimize memory management?”

Answer:

/etc/sysctl.conf
# Virtual memory
vm.swappiness=10
vm.dirty_ratio=15
vm.dirty_background_ratio=5
vm.dirty_expire_centisecs=3000
vm.dirty_writeback_centisecs=500
vm.vfs_cache_pressure=50
vm.min_free_kbytes=65536
# Memory overcommit
vm.overcommit_memory=0
vm.overcommit_ratio=50
vm.oom_dump_tasks=1
vm.oom_kill_allocating_task=1
# Huge pages
vm.nr_hugepages=1024
# Transparent huge pages
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag

Q1254: How do you configure kernel modules at startup?

Section titled “Q1254: How do you configure kernel modules at startup?”

Answer:

/etc/modules
# Load module at boot
loop
st
8021q
# Module parameters
# /etc/modprobe.d/.conf
options bonding mode=active-backup
options iptables conntrack_hashsize=262144
# Blacklist module
# /etc/modprobe.d/blacklist.conf
blacklist nouveau
blacklist snd_pcsp
# Create module dependencies
depmod -a
# View loaded modules
lsmod

Q1255: How do you implement kernel live patching?

Section titled “Q1255: How do you implement kernel live patching?”

Answer:

Terminal window
# Using kpatch (RHEL/CentOS)
yum install kpatch
kpatch install
# Create patch
# 1. Create patch file
# patch.diff
diff -Naur orig/file.c new/file.c
# 2. Build patch
kpatch build patch.diff
# 3. Apply
kpatch load kpatch-mypatch.ko
# Using livepatch (Ubuntu)
snap install canonical-livepatch
canonical-livepatch enable <token>
# Check status
canonical-livepatch status
kpatch list

Q1256: How do you configure HAProxy with SSL?

Section titled “Q1256: How do you configure HAProxy with SSL?”

Answer:

Terminal window
# Generate certificates
openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
-keyout /etc/ssl/private/haproxy.key \
-out /etc/ssl/certs/haproxy.crt
# Combine to PEM
cat /etc/ssl/certs/haproxy.crt /etc/ssl/private/haproxy.key > /etc/haproxy/haproxy.pem
# HAProxy config
# /etc/haproxy/haproxy.cfg
frontend https_front
bind *:443 ssl crt /etc/haproxy/haproxy.pem
http-response set Strict-Transport-Security "max-age=31536000"
default_backend app_servers
backend app_servers
balance roundrobin
option httpchk GET /health
http-check expect status 200
server app1 192.168.1.10:8080 check inter 2000 rise 2 fall 3
server app2 192.168.1.11:8080 check inter 2000 rise 2 fall 3
# OCSP stapling
frontend https_front
bind *:443 ssl crt /etc/haproxy/haproxy.pem ocsp-int ca-file /etc/ssl/certs/ca-certificates.crt

Q1257: How do you configure Nginx with HTTP/2?

Section titled “Q1257: How do you configure Nginx with HTTP/2?”

Answer:

/etc/nginx/nginx.conf
# Install nginx-extras for HTTP/2
apt install nginx-extras
http {
# HTTP/2 settings
server {
listen 443 ssl http2;
server_name example.com;
ssl_certificate /etc/ssl/certs/server.crt;
ssl_certificate_key /etc/ssl/private/server.key;
# SSL configuration
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256;
ssl_prefer_server_ciphers off;
# OCSP stapling
ssl_stapling on;
ssl_stapling_verify on;
location / {
proxy_pass http://backend;
}
}
}

Q1258: How do you configure Redis cluster?

Section titled “Q1258: How do you configure Redis cluster?”

Answer:

Terminal window
# Create cluster
redis-cli --cluster create 127.0.0.1:7001 127.0.0.1:7002 127.0.0.1:7003 \
127.0.0.1:7004 127.0.0.1:7005 127.0.0.1:7006 \
--cluster-replicas 1
# Check cluster
redis-cli -c -p 7001 cluster nodes
redis-cli -c -p 7001 cluster info
# Connect to cluster
redis-cli -c -p 7001
# Operations
CLUSTER INFO
CLUSTER SLOTS
CLUSTER NODES
# Failover
# Master election
redis-cli -p 7001 cluster failover
# Reshard
redis-cli --cluster reshard 127.0.0.1:7001

Q1259: How do you configure PostgreSQL replication?

Section titled “Q1259: How do you configure PostgreSQL replication?”

Answer:

/etc/postgresql/14/main/postgresql.conf
# Master configuration
wal_level = replica
max_wal_senders = 3
max_replication_slots = 3
wal_keep_size = 1GB
# /etc/postgresql/14/main/pg_hba.conf
host replication replicator 192.168.1.0/24 md5
# Create replication user
psql -U postgres
CREATE USER replicator REPLICATION LOGIN PASSWORD 'password';
# Backup for replica
pg_basebackup -h master -D /var/lib/postgresql/14/main -U replicator -P -X stream
# Replica configuration
# /etc/postgresql/14/main/postgresql.conf
hot_standby = on
# Create recovery.conf
# /etc/postgresql/14/main/recovery.conf
standby_mode = 'on'
primary_conninfo = 'host=master port=5432 user=replicator password=password'

Q1260: How do you configure MySQL group replication?

Section titled “Q1260: How do you configure MySQL group replication?”

Answer:

/etc/mysql/mysql.conf.d/mysqld.cnf
# MySQL 8.0 Group Replication
[mysqld]
server-id=1
gtid_mode=ON
enforce_gtid_consistency=ON
binlog_checksum=NONE
log_slave_updates=ON
relay_log=relay-bin
binlog_format=ROW
transaction_write_set_extraction=XXHASH64
# Group Replication plugin
plugin_load_add='group_replication.so'
group_replication_group_name="aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
group_replication_start_on_boot=OFF
group_replication_local_address="127.0.0.1:33061"
group_replication_group_seeds="127.0.0.1:33061,127.0.0.1:33062,127.0.0.1:33063"
group_replication_bootstrap_group=ON
# Start group replication
SET GLOBAL group_replication_group_name = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee";
START GROUP_REPLICATION;

Answer:

Terminal window
# Install
apt install apparmor apparmor-utils
# Status
aa-status
apparmor_status
# Disable/enable profiles
aa-disable /usr/sbin/named
aa-enable /usr/sbin/named
# Create profile
aa-genprof /usr/bin/myapp
# Edit profile
vim /etc/apparmor.d/usr.bin.myapp
# Example profile
#include <tunables/global>
/usr/bin/myapp {
#include <abstractions/base>
#include <abstractions/bash>
/etc/myapp/** r,
/var/log/myapp/* rw,
/run/myapp.sock rw,
# Deny
deny /etc/shadow r,
}
# Reload
apparmor_parser -r /etc/apparmor.d/usr.bin.myapp
# Change to enforce/complain mode
aa-complain /usr/bin/myapp
aa-enforce /usr/bin/myapp

Answer:

Terminal window
# Install
apt install fail2ban
# Configure
# /etc/fail2ban/jail.local
[DEFAULT]
bantime = 3600
findtime = 600
maxretry = 5
destemail = admin@example.com
sender = fail2ban@example.com
action = %(action_mwl)s
[sshd]
enabled = true
port = ssh
filter = sshd
logpath = /var/log/auth.log
maxretry = 3
[nginx-http-auth]
enabled = true
filter = nginx-http-auth
port = http,https
logpath = /var/log/nginx/error.log
# Custom filter
# /etc/fail2ban/filter.d/myapp.conf
[Definition]
failregex = <HOST> - .* "GET /admin
ignoreregex =
# Test
fail2ban-regex /var/log/nginx/error.log /etc/fail2ban/filter.d/nginx-http-auth
# Commands
fail2ban-client status
fail2ban-client set sshd unbanip 192.168.1.100

Q1263: How do you implement two-factor authentication?

Section titled “Q1263: How do you implement two-factor authentication?”

Answer:

Terminal window
# Install Google Authenticator PAM module
apt install libpam-google-authenticator
# Configure PAM
# /etc/pam.d/sshd
# Add before @include common-auth
auth required pam_google_authenticator.so
# Configure SSH
# /etc/ssh/sshd_config
ChallengeResponseAuthentication yes
AuthenticationMethods password,keyboard-interactive
# For user
su - username
google-authenticator
# TOTP configuration
# Add to user .google_authenticator
# Store secret securely
# Test
ssh username@server
# Enter password
# Enter 6-digit code

Q1264: How do you secure the boot process?

Section titled “Q1264: How do you secure the boot process?”

Answer:

Terminal window
# Enable UEFI secure boot
# Sign kernel
# 1. Generate keys
openssl req -new -x509 -newkey rsa:2048 -keyout MOK.priv -outform DER -out MOK.der -nodes -days 36500 -subj "/CN=My Secure Boot/"
# 2. Sign kernel
sbsign --key MOK.priv --cert MOK.der /boot/vmlinuz-$(uname -r) --output /boot/vmlinuz-$(uname -r)
# 3. Enroll key
mokutil --import MOK.der
# GRUB password protection
# /etc/grub.d/40_custom
set superusers="admin"
password_pbkdf2 admin grub.pbkdf2.sha512.10000.salt.hash
# Recompile GRUB
update-grub
# Disable USB boot
# BIOS settings or
# /etc/modprobe.d/blacklist.conf
install usb-storage /bin/true

Q1265: How do you implement file integrity monitoring?

Section titled “Q1265: How do you implement file integrity monitoring?”

Answer:

Terminal window
# Install AIDE
apt install aide
# Configure
# /etc/aide/aide.conf
# Database
database=file:/var/lib/aide/aide.db
database_out=file:/var/lib/aide/aide.db.new
# Rules
Fip = p+i+n+u+g+s+m+c+md5+sha256
Lnx = p+u+g+i+n+S
# Directories
/etc Fip
/bin Lnx
/sbin Lnx
/usr Lnx
# Initialize
aideinit
# Check
aide --check
aide --update
# Cron job
# /etc/cron.d/aide
0 5 * * * root /usr/bin/aide --check | mail -s "AIDE Report" admin@example.com

Q1266: How do you configure container orchestration?

Section titled “Q1266: How do you configure container orchestration?”

Answer:

Terminal window
# Kubernetes with kubeadm
kubeadm init --pod-network-cidr=10.244.0.0/16
# Install CNI (Flannel)
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
# Allow pods on master
kubectl taint nodes --all node-role.kubernetes.io/master-
# Deploy application
# deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
spec:
replicas: 3
selector:
matchLabels:
app: myapp
template:
metadata:
labels:
app: myapp
spec:
containers:
- name: myapp
image: myapp:latest
ports:
- containerPort: 8080
# Apply
kubectl apply -f deployment.yaml
# Scale
kubectl scale deployment myapp --replicas=5

Answer:

Terminal window
# Initialize swarm
docker swarm init --advertise-addr 192.168.1.10
# Join nodes
docker swarm join --token SWMTKN-1-xxx 192.168.1.10:2377
# Create service
docker service create \
--name myapp \
--replicas 3 \
--publish 8080:80 \
myapp:latest
# Scale service
docker service scale myapp=5
# Update service
docker service update \
--image myapp:v2 \
myapp
# Stack deploy
docker stack deploy -c docker-compose.yml myapp
# Visualize
docker node ls
docker service ls
docker service ps myapp

Answer:

Terminal window
# Install Helm
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# Add repo
helm repo add stable https://charts.helm.sh/stable
helm repo update
# Create chart
helm create mychart
# Chart structure
# mychart/
# Chart.yaml
# values.yaml
# templates/
# deployment.yaml
# service.yaml
# Install
helm install myrelease mychart
helm install myrelease mychart --set replicaCount=3
# Upgrade
helm upgrade myrelease mychart
# Rollback
helm rollback myrelease 1
# Values override
helm install myrelease mychart -f values-prod.yaml

Answer:

Terminal window
# Install Istio
curl -L https://istio.io/downloadIstio | sh -
istioctl install --set profile=demo
# Enable injection
kubectl label namespace default istio-injection=enabled
# Deploy application
kubectl apply -f app.yaml
# Configure traffic
# VirtualService
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: myapp
spec:
hosts:
- myapp
http:
- route:
- destination:
host: myapp
subset: v1
weight: 80
- destination:
host: myapp
subset: v2
weight: 20
# Monitor
istioctl dashboard kiali
istioctl dashboard prometheus

Q1270: How do you configure GitOps with ArgoCD?

Section titled “Q1270: How do you configure GitOps with ArgoCD?”

Answer:

Terminal window
# Install ArgoCD
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
# Get password
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d
# Access UI
kubectl port-forward svc/argocd-server -n argocd 8080:443
# Create application
# application.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: myapp
namespace: argocd
spec:
project: default
source:
repoURL: https://github.com/org/repo.git
targetRevision: HEAD
path: deploy
destination:
server: https://kubernetes.default.svc
namespace: myapp
syncPolicy:
automated:
prune: true
selfHeal: true
kubectl apply -f application.yaml

Q1271: How do you configure distributed storage?

Section titled “Q1271: How do you configure distributed storage?”

Answer:

Terminal window
# GlusterFS installation
apt install glusterfs-server
# Add trusted pool
gluster peer probe server2
gluster peer probe server3
gluster peer status
# Create volume
gluster volume create gv0 \
replica 3 \
server1:/brick1/server \
server2:/brick1/server \
server3:/brick1/server
# Start volume
gluster volume start gv0
# Mount
mount -t glusterfs server1:/gv0 /mnt/glusterfs
# Volume options
gluster volume set gv0 performance.cache-size 256MB
gluster volume set gv0 network.ping_timeout 10
# Rebalance
gluster volume rebalance gv0 start

Q1272: How do you configure multipath I/O?

Section titled “Q1272: How do you configure multipath I/O?”

Answer:

Terminal window
# Install
apt install multipath-tools
# Configure
# /etc/multipath.conf
defaults {
user_friendly_names yes
find_multipaths yes
}
devices {
device {
vendor "Dell"
product "MD36*"
path_grouping_policy multibus
path_checker tur
features "0"
hardware_handler "0"
}
}
# Start multipathd
systemctl start multipathd
# Commands
multipath -ll
multipath -v2
multipath -F
# Add path
multipath /dev/sda
# Get WWID
multipath -l /dev/sda

Q1273: How do you configure encrypted storage?

Section titled “Q1273: How do you configure encrypted storage?”

Answer:

Terminal window
# LUKS encryption
cryptsetup luksFormat /dev/sdb1
# Open container
cryptsetup luksOpen /dev/sdb1 encrypted_volume
# Create filesystem
mkfs.xfs /dev/mapper/encrypted_volume
# Mount
mount /dev/mapper/encrypted_volume /mnt/data
# Add key slot
cryptsetup luksAddKey /dev/sdb1
# Backup header
cryptsetup luksHeaderBackup /dev/sdb1 --header-backup-file header.img
# Auto unlock
# /etc/crypttab
encrypted_volume /dev/sdb1 none luks
# /etc/fstab
/dev/mapper/encrypted_volume /mnt/data xfs defaults 0 2

Q1274: How do you configure object storage?

Section titled “Q1274: How do you configure object storage?”

Answer:

Terminal window
# Install MinIO
wget https://dl.min.io/server/minio/release/linux-amd64/minio
chmod +x minio
# Start MinIO
export MINIO_ROOT_USER=minioadmin
export MINIO_ROOT_PASSWORD=minioadmin
./minio server /data --console-address ":9001"
# Using mc (MinIO Client)
mc alias set myminio http://localhost:9000 minioadmin minioadmin
# Create bucket
mc mb myminio/mybucket
# Set policy
mc anonymous set download myminio/mybucket
# Replication
mc mirror backup myminio/mybucket-archive
# Use with S3 SDK
# AWS CLI
aws configure
aws s3 ls s3://mybucket/

Q1275: How do you configure snapshot and backup?

Section titled “Q1275: How do you configure snapshot and backup?”

Answer:

Terminal window
# LVM snapshots
lvcreate -L 10G -s -n snap_data /dev/vg_data/lv_data
# Mount snapshot
mount -o ro,nouuid /dev/vg_data/snap_data /mnt/snap
# Remove snapshot
lvremove /dev/vg_data/snap_data
# Btrfs snapshots
btrfs subvolume snapshot /data /data/snap-$(date +%Y%m%d)
# ZFS snapshots
zfs snapshot pool/data@snap-$(date +%Y%m%d)
zfs list -t snapshot
# Send/receive
btrfs send /data/snap1 | btrfs receive /backup/
zfs send pool/data@snap1 | zfs receive backup/pool/data
# Incremental
btrfs send -p /data/snap1 /data/snap2 | btrfs receive /backup/
zfs send -i pool/data@snap1 pool/data@snap2 | zfs receive backup/pool/data

Q1276: How do you use BPF for performance?

Section titled “Q1276: How do you use BPF for performance?”

Answer:

Terminal window
# Install bpftrace
apt install bpftrace
# List probes
tplist -l
# Trace file opens
bpftrace -e 'tracepoint:syscalls:sys_enter_openat { printf("%s %s\n", comm, str(args->filename)); }'
# Trace TCP connect
bpftrace -e 'tracepoint:syscalls:sys_enter_connect { printf("Connect: %s\n", comm); }'
# Custom program
# /root/bpf/hello.bt
#!/usr/bin/bpftrace
BEGIN
{
printf("Tracing... Hit Ctrl-C to end.\n");
}
tracepoint:syscalls:sys_enter_read
{
@read_bytes = comm;
}
# Using perf
perf record -g ./program
perf report
# Using BCC tools
# /usr/share/bcc/tools/
/usr/share/bcc/tools/ext4slower 1
/usr/share/bcc/tools/tcpconnect

Answer:

Terminal window
# Using gprof
gcc -pg -g program.c -o program
./program
gprof program gmon.out > analysis.txt
# Using valgrind
valgrind --tool=callgrind ./program
# View with kcachegrind
kcachegrind callgrind.out.*
# Using perf
perf record -g ./program
perf report
perf annotate
# Using strace
strace -c ./program
strace -T -tt -e trace=write ./program
# Using flamegraph
perf record -F 99 -g ./program
perf script | stackcollapse-perf.pl > out.folded
flamegraph.pl out.folded > flamegraph.svg

Q1278: How do you tune database performance?

Section titled “Q1278: How do you tune database performance?”

Answer:

Terminal window
# PostgreSQL tuning
# postgresql.conf
shared_buffers = 256MB
effective_cache_size = 768MB
maintenance_work_mem = 64MB
checkpoint_completion_target = 0.9
wal_buffers = 16MB
default_statistics_target = 100
random_page_cost = 1.1
effective_io_concurrency = 200
work_mem = 4MB
min_wal_size = 1GB
max_wal_size = 4GB
# MySQL tuning
# my.cnf
innodb_buffer_pool_size = 1G
innodb_log_file_size = 256M
innodb_flush_log_at_trx_commit = 2
innodb_flush_method = O_DIRECT
key_buffer_size = 256M
query_cache_size = 0
tmp_table_size = 64M
max_connections = 200
# Analyze queries
EXPLAIN ANALYZE SELECT * FROM table WHERE condition;
SHOW PROCESSLIST;

Q1279: How do you optimize web server performance?

Section titled “Q1279: How do you optimize web server performance?”

Answer:

Terminal window
# Nginx optimization
worker_processes auto;
worker_rlimit_nofile 65535;
events {
worker_connections 65535;
use epoll;
multi_accept on;
}
http {
# Buffer sizes
client_body_buffer_size 10K;
client_header_buffer_size 1k;
client_max_body_size 8m;
large_client_header_buffers 4 32k;
# Caching
open_file_cache max=10000 inactive=30s;
open_file_cache_valid 60s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
# Gzip
gzip on;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_types text/plain text/css text/xml application/json application/javascript;
}

Q1280: How do you implement caching strategies?

Section titled “Q1280: How do you implement caching strategies?”

Answer:

Terminal window
# Varnish VCL
vcl 4.1;
backend default {
.host = "127.0.0.1";
.port = "8080";
}
sub vcl_recv {
# Don't cache
if (req.method == "POST" || req.method == "PUT") {
return (pass);
}
# Strip cookies for static
if (req.url ~ "\.(css|js|jpg|png|gif|ico|svg)$") {
unset req.http.Cookie;
}
}
sub vcl_backend_response {
if (beresp.http.Set-Cookie) {
return (pass);
}
# Cache static
if (bereq.url ~ "\.(css|js|jpg|png)$") {
set beresp.ttl = 24h;
}
}
# Redis caching
# /etc/redis/redis.conf
maxmemory 2gb
maxmemory-policy allkeys-lru
save ""
appendonly yes

Answer:

{
"builders": [{
"type": "amazon-ebs",
"region": "us-east-1",
"source_ami": "ami-0c55b159cbfafe1f0",
"instance_type": "t2.micro",
"ssh_username": "ubuntu",
"ami_name": "myapp-{{timestamp}}",
"run_tags": {
"Name": "packer-builder"
}
}],
"provisioners": [{
"type": "shell",
"execute_command": "sudo {{.Path}}",
"script": "provision.sh"
}, {
"type": "ansible",
"playbook_file": "playbook.yml"
}],
"post-processors": [{
"type": "vagrant",
"keep_input_artifact": true
}]
}

Q1282: How do you use Vagrant with provisioning?

Section titled “Q1282: How do you use Vagrant with provisioning?”

Answer:

Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/jammy64"
# Shell provisioner
config.vm.provision "shell", inline: "apt-get update"
# File provisioner
config.vm.provision "file", source: "config/", destination: "/tmp/config"
# Ansible provisioner
config.vm.provision "ansible" do |ansible|
ansible.playbook = "playbook.yml"
ansible.extra_vars = {
deploy_user: "vagrant"
}
end
# Docker provisioner
config.vm.provision "docker" do |d|
d.pull_images "ubuntu:22.04"
d.run "nginx", ports: ["80:80"]
end
# Puppet provisioner
config.vm.provision "puppet" do |p|
p.manifest_file = "site.pp"
p.module_path = "modules"
end
end

Answer:

Terminal window
# Using Packer
packer build template.json
# Manual steps
# 1. Launch instance
aws ec2 run-instances \
--image-id ami-0c55b159cbfafe1f0 \
--instance-type t3.micro \
--key-name mykey
# 2. Customize
ssh -i key.pem ubuntu@instance
sudo apt update
sudo apt install nginx docker.io
# 3. Create image
aws ec2 create-image \
--instance-id i-1234567890abcdef0 \
--name "Custom-Nginx-$(date +%Y%m%d)" \
--description "Custom AMI with nginx" \
--no-reboot
# 4. Share AMI
aws ec2 modify-image-attribute \
--image-id ami-12345678 \
--launch-permission "Add=[{UserId=123456789012}]"

Q1284: How do you configure infrastructure testing?

Section titled “Q1284: How do you configure infrastructure testing?”

Answer:

Terminal window
# Using Inspec
# inspec.yml
name: linux-baseline
title: Linux Baseline
version: 1.0.0
# controls/apache.rb
control 'apache-01' do
impact 1.0
title 'Apache should be installed'
desc 'Apache is required for web serving'
describe package('apache2') do
it { should be_installed }
end
describe service('apache2') do
it { should be_installed }
it { should be_running }
end
describe port(80) do
it { should be_listening }
end
end
# Run
inspec exec profile/
inspec exec profile/ --attrs attributes.yml
inspec check profile/

Q1285: How do you implement GitOps workflow?

Section titled “Q1285: How do you implement GitOps workflow?”

Answer:

Terminal window
# 1. Store infrastructure in Git
git init infra-repo
cd infra-repo
# 2. Directory structure
# .
# ├── applications/
# │ └── myapp/
# │ ├── deployment.yaml
# │ └── service.yaml
# └── infrastructure/
# ├── terraform/
# └── ansible/
# 3. Deploy with ArgoCD
kubectl apply -f application.yaml
# 4. CI pipeline
# .gitlab-ci.yml
deploy:
stage: deploy
script:
- git add .
- git commit -m "Update myapp to $CI_COMMIT_SHA"
- git push
only:
- main
# 5. Drift detection
# ArgoCD will detect drift and sync automatically
argocd app sync myapp
argocd app diff myapp

Q1286: How do you configure network bonding modes?

Section titled “Q1286: How do you configure network bonding modes?”

Answer:

/etc/modprobe.d/bonding.conf
# Mode 0: Round-robin
alias bond0 bonding
options bond0 mode=0 miimon=100
# Mode 1: Active-backup
options bond0 mode=1 miimon=100 primary=eth0
# Mode 4: 802.3ad (LACP)
options bond0 mode=4 miimon=100 lacp_rate=1
# Mode 5: Balance-tlb
options bond0 mode=5 miimon=100
# Interface configuration
# /etc/sysconfig/network-scripts/ifcfg-bond0
DEVICE=bond0
BONDING_OPTS="mode=5 miimon=100"
IPADDR=192.168.1.10
NETMASK=255.255.255.0
ONBOOT=yes
# Slave interfaces
# /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
MASTER=bond0
SLAVE=yes
ONBOOT=yes

Answer:

Terminal window
# Create VLAN interface
ip link add link eth0 name eth0.100 type vlan id 100
ip link set eth0.100 up
ip addr add 192.168.100.1/24 dev eth0.100
# Using vconfig
vconfig add eth0 100
ifconfig eth0.100 192.168.100.1 netmask 255.255.255.0 up
# Persistent configuration
# /etc/network/interfaces (Debian)
auto eth0.100
iface eth0.100 inet static
address 192.168.100.1
netmask 255.255.255.0
vlan-raw-device eth0
# RHEL
# /etc/sysconfig/network-scripts/ifcfg-eth0.100
VLAN=yes
DEVICE=eth0.100
PHYSDEV=eth0
VLAN_ID=100
IPADDR=192.168.100.1
NETMASK=255.255.255.0

Answer:

Terminal window
# Create bridge
brctl addbr br0
ip addr add 192.168.1.1/24 dev br0
ip link set br0 up
# Add interfaces
brctl addif br0 eth0
brctl addif br0 eth1
# Persistent configuration
# /etc/network/interfaces
auto br0
iface br0 inet static
address 192.168.1.1
netmask 255.255.255.0
bridge_ports eth0 eth1
bridge_stp off
bridge_fd 0
bridge_maxwait 0
# Using iproute2
ip link add name br0 type bridge
ip link set dev eth0 master br0
ip link set dev eth1 master br0
# View
brctl show
ip link show type bridge

Q1289: How do you configure tunnel interfaces?

Section titled “Q1289: How do you configure tunnel interfaces?”

Answer:

Terminal window
# GRE tunnel
ip tunnel add gre0 mode gre remote 203.0.113.2 local 203.0.113.1
ip link set gre0 up
ip addr add 10.0.0.1/30 dev gre0
# IPIP tunnel
ip tunnel add ipip0 mode ipip remote 203.0.113.2 local 203.0.113.1
ip link set ipip0 up
ip addr add 10.0.0.1/30 dev ipip0
# WireGuard
apt install wireguard
# wg0.conf
[Interface]
PrivateKey = <server-private-key>
Address = 10.0.0.1/24
ListenPort = 51820
[Peer]
PublicKey = <client-public-key>
AllowedIPs = 10.0.0.2/32
wg-quick up wg0

Answer:

/etc/keepalived/keepalived.conf
# Install keepalived
apt install keepalived
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
virtual_ipaddress {
192.168.1.100/24 dev eth0
}
track_interface {
eth0 weight -20
}
authentication {
auth_type AH
auth_pass secret123
}
notify_backup "/usr/local/bin/backup.sh"
notify_fault "/usr/local/bin/fault.sh"
notify_master "/usr/local/bin/master.sh"
}
# Backup configuration
# priority 90
# state BACKUP

Q1291: How do you configure KVM live migration?

Section titled “Q1291: How do you configure KVM live migration?”

Answer:

Terminal window
# Enable migration on source
virsh migrate --live --persistent --undefinesource \
vmname qemu+ssh://dest-host/system
# Using migrate command
virsh migrate --live vmname dest-host
# With compression
virsh migrate --live --compressed --desturi qemu+tcp://dest-host/system vmname
# With TLS
virsh migrate --live --tls dest-host vmname
# Configure for migration
# /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
auth_tcp = "none"
# Enable in /etc/default/libvirt-bin
# LIBVIRTD_ARGS="--listen"
# Verify migration
virsh dominfo vmname
virsh migrate-getspeed vmname

Q1292: How do you configure nested virtualization?

Section titled “Q1292: How do you configure nested virtualization?”

Answer:

Terminal window
# Enable nesting (Intel)
# Add to kernel parameters
kvm-intel.nested=1
# Check
cat /sys/module/kvm_intel/parameters/nested
# Y = enabled
# Create nested VM
virt-install \
--name nested-vm \
--ram 2048 \
--vcpus 2 \
--disk path=/var/lib/libvirt/images/nested.qcow2 \
--os-variant ubuntu22.04 \
--graphics vnc \
--cpu host-passthrough
# Verify
# Inside nested VM
lscpu | grep Virtualization
# Should show VT-x or AMD-V
# Nested Docker
# On nested VM
apt install docker.io
# Should work
docker run hello-world

Q1293: How do you configure PCI passthrough?

Section titled “Q1293: How do you configure PCI passthrough?”

Answer:

/etc/default/grub
# Enable IOMMU
GRUB_CMDLINE_LINUX="intel_iommu=on"
# or
GRUB_CMDLINE_LINUX="amd_iommu=on"
# Update GRUB
update-grub
reboot
# Verify
dmesg | grep -e DMAR -e IOMMU
# Bind device to vfio
lspci -nnk -d 10de: # NVIDIA
# 10de:1b80:01:00.0
# vfio-pci.ids
echo 10de 1b80 > /etc/modprobe.d/vfio.conf
# Unbind from driver
virsh nodedev-detach pci_0000_01_00_0
# Add to VM
# virsh edit vmname
<hostdev mode='subsystem' type='pci' managed='yes'>
<source>
<address domain='0x0000' bus='0x01' slot='0x00' function='0x0'/>
</source>
</hostdev>

Q1294: How do you configure GPU virtualization?

Section titled “Q1294: How do you configure GPU virtualization?”

Answer:

Terminal window
# NVIDIA vGPU
# Install nvidia-vgpud and nvidia-vgpu-mgr
apt install nvidia-vgpu-mgr
# Configure vGPU
# /etc/nvidia-vgpu-mgr/vgpuConfig.txt
vgpuConfig=1
guestVmTimeout=60
NVMgrid
# Create vGPU
virsh nodedev-create vgpu0
# Or using NVIDIA GRID
# Install NVIDIA driver in VM
./NVIDIA-Linux-x86_64-grid.run
# Check
nvidia-smi
# Should show vGPU
# For AMD GPU passthrough
# See PCI passthrough

Q1295: How do you configure storage pools?

Section titled “Q1295: How do you configure storage pools?”

Answer:

Terminal window
# Create directory pool
virsh pool-define-as default dir --target /var/lib/libvirt/images
virsh pool-build default
virsh pool-start default
virsh pool-autostart default
# Create LVM pool
virsh pool-define-as vgpool logical \
--source-name vg_libvirt \
--target /dev/vg_libvirt
# Create NFS pool
virsh pool-define-as nfspool netfs \
--source-format nfs \
--source-host 192.168.1.10 \
--source-path /share \
--target /mnt/nfs
# View pools
virsh pool-list
virsh pool-info default
# Create volume
virsh vol-create default --name vm1.qcow2 --capacity 10G --allocation 10G
virsh vol-create-as default vm2.qcow2 20G

Q1296: How do you configure custom metrics?

Section titled “Q1296: How do you configure custom metrics?”

Answer:

/usr/local/bin/custom_metrics
# Using Prometheus node_exporter
# Custom collector script
#!/bin/bash
while true; do
echo "custom_app_requests_total $(date +%s)"
echo "custom_app_active_connections $(netstat -an | grep :8080 | wc -l)"
sleep 10
done | nc -q1 localhost 9100
# Using textfile collector
cat > /usr/local/bin/app_metrics.sh << 'EOF'
#!/bin/bash
METRICS_DIR="/var/lib/node_exporter/textfile_collector"
# Application metrics
APP_REQUESTS=$(curl -s localhost:8080/metrics | grep requests | awk '{print $2}')
APP_LATENCY=$(curl -s localhost:8080/metrics | grep latency | awk '{print $2}')
cat > ${METRICS_DIR}/app.prom << EOM
app_requests_total ${APP_REQUESTS}
app_latency_seconds ${APP_LATENCY}
EOM
EOF
chmod +x /usr/local/bin/app_metrics.sh
crontab -e
# * * * * * /usr/local/bin/app_metrics.sh

Answer:

/etc/prometheus/rules/alerts.yml
# Prometheus alerting rules
groups:
- name: linux_alerts
interval: 30s
rules:
- alert: HighCPUUsage
expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
for: 5m
labels:
severity: warning
annotations:
summary: "High CPU usage on {{ $labels.instance }}"
- alert: HighMemoryUsage
expr: (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes * 100 > 90
for: 5m
labels:
severity: critical
- alert: DiskSpaceLow
expr: (node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"}) < 0.1
for: 10m
labels:
severity: warning

Q1298: How do you configure distributed tracing?

Section titled “Q1298: How do you configure distributed tracing?”

Answer:

Terminal window
# Install Jaeger
docker run -d --name jaeger \
-e COLLECTOR_ZIPKIN_HOST_PORT=:9411 \
-p 6831:6831/udp \
-p 16686:16686 \
jaegertracing/all-in-one:1.35
# Using OpenTelemetry
# Python example
from opentelemetry import trace
from opentelemetry.exporter.jaeger.thrift import JaegerExporter
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
trace.set_tracer_provider(TracerProvider())
jaeger_exporter = JaegerExporter(
agent_host_name="localhost",
agent_port=6831,
)
trace.get_tracer_provider().add_span_processor(
BatchSpanProcessor(jaeger_exporter)
)
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span("hello-span"):
print("Hello, World!")

Q1299: How do you configure log aggregation?

Section titled “Q1299: How do you configure log aggregation?”

Answer:

/etc/filebeat/filebeat.yml
# Filebeat configuration
filebeat.inputs:
- type: log
paths:
- /var/log/syslog
- /var/log/auth.log
fields:
type: syslog
fields_under_root: true
- type: log
paths:
- /var/log/nginx/*.log
fields:
type: nginx
fields_under_root: true
output.logstash:
hosts: ["logstash:5044"]
processors:
- add_host_metadata:
fields_under_root: true
- add_docker_metadata: ~
# Logstash pipeline
# /etc/logstash/conf.d/01-input.conf
input {
beats {
port => 5044
}
}
# /etc/logstash/conf.d/02-filter.conf
filter {
if [type] == "syslog" {
grok {
match => { "message" => "%{SYSLOGTIMESTAMP:timestamp} %{SYSLOGHOST:hostname} %{DATA:program}(?:\[%{POSINT:pid}\])?: %{GREEDYDATA:message}" }
}
date {
match => [ "timestamp", "MMM dd HH:mm:ss" ]
}
}
}

Q1300: How do you configure Grafana dashboards?

Section titled “Q1300: How do you configure Grafana dashboards?”

Answer:

{
"dashboard": {
"title": "System Overview",
"panels": [
{
"id": 1,
"title": "CPU Usage",
"type": "graph",
"gridPos": {"x": 0, "y": 0, "w": 12, "h": 8},
"targets": [
{
"expr": "100 - (avg by (mode) (irate(node_cpu_seconds_total{mode='idle'}[5m])) * 100)",
"legendFormat": "{{mode}}"
}
]
},
{
"id": 2,
"title": "Memory Usage",
"type": "graph",
"gridPos": {"x": 12, "y": 0, "w": 12, "h": 8},
"targets": [
{
"expr": "node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes",
"legendFormat": "Used"
},
{
"expr": "node_memory_MemAvailable_bytes",
"legendFormat": "Available"
}
]
},
{
"id": 3,
"title": "Network Traffic",
"type": "graph",
"gridPos": {"x": 0, "y": 8, "w": 12, "h": 8},
"targets": [
{
"expr": "irate(node_network_receive_bytes_total{device!='lo'}[5m])",
"legendFormat": "{{device}} RX"
},
{
"expr": "irate(node_network_transmit_bytes_total{device!='lo'}[5m])",
"legendFormat": "{{device}} TX"
}
]
}
]
}
}

Answer:

Terminal window
# Check interface status
ip link show
ip addr show
ethtool eth0
miitool eth0
# Check routing
ip route show
ip route get 8.8.8.8
ip neighbor show
# Check DNS
dig +short example.com
getent hosts example.com
cat /etc/resolv.conf
# Connectivity tests
ping -c 4 8.8.8.8
traceroute -I 8.8.8.8
mtr -n 8.8.8.8
# Port tests
nc -zv host port
telnet host port
ss -tulpn | grep :port
# Packet capture
tcpdump -i eth0 host 192.168.1.1
tcpdump -i eth0 port 80
tcpdump -i eth0 -w capture.pcap

Answer:

Terminal window
# Service status
systemctl status service-name
systemctl list-units --failed
# Service logs
journalctl -u service-name -n 50
journalctl -u service-name --since "1 hour ago"
journalctl -xe
# Process info
ps auxf | grep service-name
lsof -p $(pgrep -f service-name)
# Configuration test
service-name -t
nginx -t
apache2ctl configtest
# Dependencies
systemctl list-dependencies service-name
# Resource limits
cat /proc/$(pgrep -f service-name)/limits
# Strace
strace -f -p $(pgrep -f service-name)
strace -c service-name
# Cgroups
systemd-cgls | grep service-name
systemctl show service-name

Q1303: How do you debug performance issues?

Section titled “Q1303: How do you debug performance issues?”

Answer:

Terminal window
# CPU analysis
top -c
htop
mpstat -P ALL 1
pidstat -p <pid> 1
# Memory analysis
free -h
vmstat 1
pmap -x <pid>
cat /proc/<pid>/status
# I/O analysis
iostat -xz 1
iotop
pidstat -d 1
# Network analysis
nethogs
iftop
ss -s
# System resources
sar -A 1 5
atop
# Process analysis
perf top
flamegraph.pl < perf.data
# Application profiling
python -m cProfile -o profile.out app.py
pyprof2calltree -i profile.out

Answer:

Terminal window
# Disk usage
df -h
df -i
du -sh /*
# Find large files
find / -type f -size +100M -exec ls -lh {} \; 2>/dev/null | sort -k5 -h
# I/O stats
iostat -xz 1
sar -d 1
# Check filesystem
fsck -n /dev/sda1
xfs_repair -n /dev/sda1
# Check SMART
smartctl -a /dev/sda
smartctl -H /dev/sda
# Mount options
mount | grep sda1
cat /proc/mounts
# Lsof for deleted files
lsof +L1
ls -l /proc/*/fd/* | grep deleted

Q1305: How do you debug authentication issues?

Section titled “Q1305: How do you debug authentication issues?”

Answer:

Terminal window
# Check SSH logs
tail -f /var/log/auth.log
journalctl -u sshd -f
# Check PAM
tail -f /var/log/syslog | grep -i pam
# Test authentication
# SSH with debug
ssh -vvv user@host
# Test PAM
pamtester login username authenticate
# Check sudo
sudo -l
tail -f /var/log/auth.log | grep sudo
# LDAP issues
ldapsearch -x -D "cn=admin,dc=example,dc=com" -W
getent passwd username
id username
# Kerberos
kinit -f user@REALM
klist

Q1306: How do you configure IP masquerading?

Section titled “Q1306: How do you configure IP masquerading?”

Answer:

Terminal window
# Enable IP forwarding
sysctl -w net.ipv4.ip_forward=1
# Add to /etc/sysctl.conf
net.ipv4.ip_forward=1
# NAT with iptables
# Outbound NAT
iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
# Or specific IP
iptables -t nat -A POSTROUTING -o eth0 -j SNAT --to-source 203.0.113.10
# Inbound port forwarding
iptables -t nat -A PREROUTING -i eth0 -p tcp --dport 80 -j DNAT --to-destination 192.168.1.10:8080
# Save rules
iptables-save > /etc/iptables/rules.v4
# or
service iptables save

Q1307: How do you configure packet filtering?

Section titled “Q1307: How do you configure packet filtering?”

Answer:

Terminal window
# Basic filter rules
# Allow loopback
iptables -A INPUT -i lo -j ACCEPT
# Allow established
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# Allow SSH
iptables -A INPUT -p tcp --dport 22 -j ACCEPT
# Allow HTTP/HTTPS
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
iptables -A INPUT -p tcp --dport 443 -j ACCEPT
# Drop everything else
iptables -A INPUT -j DROP
# Forward rules
iptables -A FORWARD -i eth0 -o eth1 -j ACCEPT
iptables -A FORWARD -i eth1 -o eth0 -m state --state ESTABLISHED,RELATED -j ACCEPT
# Rate limiting
iptables -A INPUT -p tcp --dport 22 -m state --state NEW -m recent --set
iptables -A INPUT -p tcp --dport 22 -m state --state NEW -m recent --update --seconds 60 --hitcount 4 -j DROP

Answer:

Terminal window
# Install stubby
apt install stubby
# Configure
# /etc/stubby/stubby.yml
resolution_type: GETDNS_RESOLUTION_TYPE_STUB
listen_addresses:
- 127.0.0.1@53
upstream_recursive_servers:
- address_data: 1.1.1.1
tls_auth_name: "cloudflare-dns.com"
- address_data: 8.8.8.8
tls_auth_name: "dns.google"
# Enable systemd-resolved
systemctl start stubby
systemctl enable stubby
# Configure systemd-resolved
# /etc/systemd/resolved.conf
[Resolve]
DNS=127.0.0.1
DNSOverTLS=yes
# Test
dig @127.0.0.1 example.com +tls

Answer:

Terminal window
# OpenVPN
apt install openvpn easy-rsa
# Generate keys
cd /etc/openvpn/easy-rsa
./easyrsa init-pki
./easyrsa build-ca
./easyrsa build-server-full server nopass
./easyrsa build-client-full client1 nopass
# Server config
# /etc/openvpn/server.conf
port 1194
proto udp
dev tun
ca ca.crt
cert server.crt
key server.key
dh dh.pem
server 10.8.0.0 255.255.255.0
push "redirect-gateway def1 bypass-dhcp"
push "dhcp-option DNS 8.8.8.8"
keepalive 10 120
cipher AES-256-GCM
auth SHA256
persist-key
persist-tun
status openvpn-status.log
# Client config
client
dev tun
proto udp
remote vpn.example.com 1194
resolv-retry infinite
nobind
persist-key
persist-tun
remote-cert-tls server
cipher AES-256-GCM
auth SHA256

Q1310: How do you configure policy routing?

Section titled “Q1310: How do you configure policy routing?”

Answer:

Terminal window
# Additional routing tables
echo "100 fast" >> /etc/iproute2/rt_tables
echo "200 slow" >> /etc/iproute2/rt_tables
# Add routes to tables
ip route add 192.168.1.0/24 dev eth1 src 192.168.2.1 table fast
ip route add default via 192.168.1.1 table fast
# Policy routing rules
ip rule add from 192.168.2.10 table fast
ip rule add to 192.168.1.0/24 table fast
ip rule add fwmark 1 table slow
# Mark packets
iptables -A PREROUTING -s 192.168.2.10 -j MARK --set-mark 1
iptables -A OUTPUT -s 192.168.2.10 -j MARK --set-mark 1
# Persistent
# /etc/network/interfaces
post-up ip rule add from 192.168.2.10 table fast
post-down ip rule del from 192.168.2.10 table fast

Answer:

Terminal window
# Install fence-agents
yum install fence-agents-all
# Configure STONITH
# /etc/cluster/cluster.conf
<cluster name="mycluster" config_version="1">
<fence_daemon post_fail_delay="0" post_join_delay="3"/>
<fence_method name="method" device="name">
<fence_level level="1" id="level1">
<device name="device" action="on" port="node1"/>
</fence_level>
</fence_method>
<nodes>
<node name="node1" nodeid="1" fences="method"/>
<node name="node2" nodeid="2" fences="method"/>
</nodes>
</cluster>
# IPMI fence
stonith -a ipmi -t "ipmilan=user:pass@node1" -v
# Test fence
pcs stonith fence node1
pcs stonith history node1

Q1312: How do you configure resource constraints?

Section titled “Q1312: How do you configure resource constraints?”

Answer:

Terminal window
# Colocation constraint
pcs constraint colocation add WebService VirtualIP INFINITY
# Order constraint
pcs constraint order VirtualIP then WebService
# Location constraint (preference)
pcs constraint location WebService prefers node1=50
# Location constraint (avoid)
pcs constraint location WebService avoids node2
# Resource stickiness
pcs resource meta WebService resource-stickiness=100
# Migration threshold
pcs resource meta WebService migration-threshold=3
# Priority
pcs resource priority WebService=10
# Utilization
pcs node utilization node1 cpu=4 memory=16
pcs resource utilization WebService cpu=2 memory=4

Answer: you configure load balancing```bash

apt install ipvsadm

ipvsadm -A -t 192.168.1.100:80 -s rr ipvsadm -a -t 192.168.1.100:80 -r 192.168.1.10:80 -m ipvsadm -a -t 192.168.1.100:80 -r 192.168.1.11:80 -m

ipvsadm -l -n ipvsadm -l -n —stats

ipvsadm -A -t 192.168.1.100:80 -s rr -p 300

ipvsadm -e -t 192.168.1.100:80 -r 192.168.1.10:80 -m -w 2

### Q1314: How do you configure health checking?
**Answer:**
```bash
# HAProxy health check
# /etc/haproxy/haproxy.cfg
backend servers
balance roundrobin
option httpchk GET /health
http-check expect status 200
server s1 192.168.1.10:8080 check inter 2000 rise 2 fall 3
server s2 192.168.1.11:8080 check inter 2000 rise 2 fall 3 slowstart 10s
# Custom health check script
# healthcheck.sh
#!/bin/bash
curl -sf http://localhost:8080/health || exit 1
# Using monitor-ptr in nginx
# nginx.conf
upstream backend {
server 192.168.1.10:8080;
server 192.168.1.11:8080;
server 192.168.1.12:8080 backup;
}
# Keepalived health check
vrrp_script check_service {
script "/usr/local/bin/healthcheck.sh"
interval 5
weight -10
}

Q1315: How do you implement split-brain prevention?

Section titled “Q1315: How do you implement split-brain prevention?”

Answer:

/etc/corosync/corosync.conf
# Quorum configuration
quorum {
provider: corosync_votequorum
expected_votes: 2
two_node: 1
}
# Ping node for quorum
quorum {
provider: corosync_votequorum
expected_votes: 3
wait_for_all: 1
}
# Tie-breaker
totem {
interface {
ringnumber: 0
transport: udpu
}
}
nodelist {
node {
ring0_addr: node1
nodeid: 1
}
node {
ring0_addr: node2
nodeid: 2
}
node {
ring0_addr: node3
nodeid: 3
}
}
# Split-brain recovery script
# /usr/local/bin/split-brain-recovery.sh
pcs cluster stop node1
pcs cluster start node2

Q1316: How do you write efficient scripts?

Section titled “Q1316: How do you write efficient scripts?”

Answer:

#!/bin/bash
set -euo pipefail
IFS=$'\n\t'
# Use arrays for loops
files=( /var/log/*.log )
for file in "${files[@]}"; do
[[ -f "$file" ]] || continue
process "$file"
done
# Use functions with local variables
process() {
local file="$1"
local content
content=$(<"$file")
echo "${content:0:100}"
}
# Avoid subshells in loops
while IFS= read -r line; do
((count++))
done < <(grep -r "pattern" .)
# Use coprocesses
coproc BC { bc -l; }
echo "scale=10; 355/113" >&${BC[1]}
read -r pi <&${BC[0]}
# Use process substitution
diff <(sort file1) <(sort file2)
# Parallel processing
parallel -j 4 process {} ::: *.log

Answer:

Terminal window
# Using jq
# Parse JSON
cat data.json | jq '.name'
cat data.json | jq '.items[].value'
# Filter
cat data.json | jq '.items[] | select(.id > 5)'
# Transform
cat data.json | jq '{name: .name, computed: .value * 2}'
# Arrays
cat data.json | jq '.items | length'
cat data.json | jq '.items | map(.name)'
# Create JSON
jq -n '{name: "test", items: [1,2,3]}'
# Modify
jq '.name = "new"' data.json
jq '.items += ["new"]' data.json
# Input from variable
data='{"name":"test"}'
echo "$data" | jq '.name'

Answer:

#!/usr/bin/env python3
import sys
import json
import subprocess
import logging
logging.basicConfig(level=logging.INFO)
def run_command(cmd):
result = subprocess.run(
cmd,
shell=True,
capture_output=True,
text=True
)
return result.stdout.strip()
def parse_json_file(filepath):
with open(filepath) as f:
return json.load(f)
def main():
# Get system info
cpu = run_command("nproc")
mem = run_command("free -h | awk '/^Mem:/ {print $2}'")
# Process data
data = parse_json_file("config.json")
# Output
result = {
"cpu_cores": cpu,
"memory": mem,
"config": data
}
print(json.dumps(result, indent=2))
return 0
if __name__ == "__main__":
sys.exit(main())

Answer:

Terminal window
# Extract IP addresses
grep -oE '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' file.txt
# Validate email
email="user@example.com"
if [[ "$email" =~ ^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then
echo "Valid"
fi
# Extract dates
grep -oE '[0-9]{4}-[0-9]{2}-[0-9]{2}' file.txt
# Replace with sed
sed -E 's/([0-9]{4})([0-9]{2})([0-9]{2})/\1-\2-\3/g' file.txt
# Using awk
awk '/pattern/ {print $1, $2}' file.txt
awk 'match($0, /[0-9]+/) {print substr($0, RSTART, RLENGTH)}' file.txt
# Complex parsing
awk '
BEGIN { FPAT = "([^,]+)|(\"[^\"]+\")" }
{
for (i=1; i<=NF; i++) {
gsub(/^"|"$/, "", $i)
print i ": " $i
}
}
' data.csv

Q1320: How do you handle errors in scripts?

Section titled “Q1320: How do you handle errors in scripts?”

Answer:

#!/bin/bash
set -euo pipefail
# Exit handlers
cleanup() {
local exit_code=$?
if [[ $exit_code -ne 0 ]]; then
echo "Script failed with exit code $exit_code" >&2
fi
}
trap cleanup EXIT
# Error function
error() {
echo "ERROR: $*" >&2
return 1
}
# Validate input
validate_input() {
local file="$1"
[[ -f "$file" ]] || error "File not found: $file"
[[ -r "$file" ]] || error "File not readable: $file"
}
# Retry logic
retry() {
local max_attempts=3
local delay=5
local attempt=1
while [[ $attempt -le $max_attempts ]]; do
if "$@"; then
return 0
fi
echo "Attempt $attempt failed, retrying in $delay seconds..."
sleep $delay
((attempt++))
done
error "Failed after $max_attempts attempts"
}
# Test
validate_input "/etc/passwd" || exit 1
retry curl -sf http://example.com

Q1321: How do you implement audit logging?

Section titled “Q1321: How do you implement audit logging?”

Answer:

/etc/audit/audit.rules
# Configure auditd
# Watch files
-w /etc/passwd -p wa -k passwd_changes
-w /etc/shadow -p wa -k shadow_changes
-w /etc/sudoers -p wa -k sudoers_changes
-w /etc/ssh/sshd_config -p wa -k sshd_config
# Watch directories
-w /etc/httpd/conf -p wa -k httpd_conf
-w /etc/nginx/nginx.conf -p wa -k nginx_conf
# System calls
-a always,exit -F arch=b64 -S execve -F path=/usr/bin/wget -k network_download
-a always,exit -F arch=b64 -S execve -F path=/usr/bin/curl -k network_download
-a always,exit -F arch=b64 -S setuid -k privilege_escalation
# Commands
-w /usr/bin/sudo -p x -k sudo_commands
-w /usr/bin/su -p x -k su_commands
# View logs
ausearch -k passwd_changes
aureport -f
aureport -u

Q1322: How do you implement access control?

Section titled “Q1322: How do you implement access control?”

Answer:

/etc/pam.d/common-session
# Configure PAM
session required pam_unix.so
session optional pam_mkhomedir.so skel=/etc/skel umask=077
# /etc/security/limits.conf
# Resource limits
* soft nofile 65535
* hard nofile 65535
* soft nproc 4096
* hard nproc 8192
# Time restrictions
# /etc/security/time.conf
login;*;user1;Al0900-1700
sshd;*;user2;Al0900-1700
# Using ACLs
# Install
apt install acl
# Set ACL
setfacl -m u:john:r-x /var/www/html
setfacl -m g:developers:rx /var/www/html
# Default ACL
setfacl -d -m u:john:rx /var/www/html
# View ACL
getfacl /var/www/html

Q1323: How do you implement network segmentation?

Section titled “Q1323: How do you implement network segmentation?”

Answer:

Terminal window
# Create isolated network namespace
ip netns add isolated
ip netns exec isolated ip link set lo up
# Configure VLAN
ip link add link eth0 name eth0.100 type vlan id 100
ip addr add 192.168.100.1/24 dev eth0.100
ip link set eth0.100 up
# iptables zones
# DMZ
iptables -N DMZ-ZONE
iptables -A DMZ-ZONE -j DROP
# Internal
iptables -N INT-ZONE
iptables -A INT-ZONE -j ACCEPT
# Bridge isolation
ip link add name br0 type bridge
ip link set eth0 master br0
ip link set eth1 master br0
# Prevent forwarding
iptables -A FORWARD -i br0 -o br0 -j DROP
# AppArmor namespaces
apparmor_parser -r /etc/apparmor.d/*

Q1324: How do you implement encryption at rest?

Section titled “Q1324: How do you implement encryption at rest?”

Answer:

Terminal window
# LUKS encryption
cryptsetup luksFormat /dev/sdb1
cryptsetup luksOpen /dev/sdb1 encrypted
mkfs.xfs /dev/mapper/encrypted
mount /dev/mapper/encrypted /mnt/data
# eCryptfs
apt install ecryptfs-utils
mount -t ecryptfs /data /encrypted
# TPM encryption
apt install tpm-tools
# VeraCrypt
veracrypt --create container
veracrypt container /mnt/veracrypt
# Filesystem encryption
# fscrypt
mkfs.ext4 -O encrypt /dev/sda1
mount /dev/sda1 /mnt/data
fscrypt setup
fscrypt encrypt /mnt/data

Q1325: How do you implement key management?

Section titled “Q1325: How do you implement key management?”

Answer:

Terminal window
# Generate GPG key
gpg --full-generate-key
# Export public key
gpg --armor --export user@example.com > public.asc
# Import key
gpg --import public.asc
# Encrypt file
gpg -e -r user@example.com file.txt
# Decrypt file
gpg -d file.txt.gpg
# Hash verification
sha256sum file.tar.gz
sha256sum -c file.tar.gz.sha256
# HMAC
openssl dgst -sha256 -hmac "key" file.txt
# Use keyctl
keyctl add user mykey "my secret data" @u
keyctl list @u
keyctl pipe $(keyctl search @u user mykey)

Answer:

Terminal window
# Install bpftrace
apt install bpftrace
# Simple trace
bpftrace -e 'BEGIN { printf("Tracing... Hit Ctrl-C to end.\n"); }'
# Trace syscalls
bpftrace -e 'tracepoint:syscalls:sys_enter_openat { printf("%s\n", str(args->filename)); }'
# Custom bpf program
# hello.bt
#!/usr/bin/bpftrace
tracepoint:syscalls:sys_enter_read
/pid == 1234/
{
@[comm] = count();
}
# Run
chmod +x hello.bt
./hello.bt
# Using bcc
/usr/share/bcc/tools/execsnoop
/usr/share/bcc/tools/opensnoop
/usr/share/bcc/tools/tcplife

Q1327: How do you implement zero-downtime deployment?

Section titled “Q1327: How do you implement zero-downtime deployment?”

Answer:

Terminal window
# Blue-green deployment with HAProxy
# haproxy.cfg
listen app
bind *:80
mode http
balance roundrobin
server blue 192.168.1.10:8080 check
server green 192.168.1.11:8080 check backup
# Switch traffic
# Deploy to green
# Test green
# Switch
echo "set server app/green state READY" | socat stdio /var/run/haproxy.sock
# Rolling update with systemd
# /etc/systemd/system/myapp.service.d/override.conf
[Service]
ExecStartPost=/usr/local/bin/healthcheck.sh
# Kubernetes rolling update
kubectl set image deployment/myapp myapp=myapp:v2
kubectl rollout status deployment/myapp
kubectl rollout undo deployment/myapp
# Nginx
# nginx.conf
upstream backend {
server 192.168.1.10:8080;
server 192.168.1.11:8080;
}

Q1328: How do you implement rate limiting?

Section titled “Q1328: How do you implement rate limiting?”

Answer:

Terminal window
# iptables rate limit
iptables -A INPUT -p tcp --dport 80 -m state --state NEW \
-m recent --set
iptables -A INPUT -p tcp --dport 80 -m state --state NEW \
-m recent --update --seconds 60 --hitcount 10 -j DROP
# Nginx rate limiting
# nginx.conf
http {
limit_req_zone $binary_remote_addr zone=limit:10m rate=10r/s;
server {
location / {
limit_req zone=limit burst=20 nodelay;
}
}
}
# HAProxy rate limiting
# haproxy.cfg
http-request deny deny_status 429 if { sc_http_req_rate(10) gt 10 }
# Application rate limiting
# Python example
from flask import Flask, request, jsonify
from flask_limiter import Limiter
app = Flask(__name__)
limiter = Limiter(app, key_func=get_remote_address)
@app.route("/api")
@limiter.limit("10 per minute")
def api():
return jsonify({"status": "ok"})

Q1329: How do you implement circuit breaker?

Section titled “Q1329: How do you implement circuit breaker?”

Answer:

# Python circuit breaker
from functools import wraps
import time
import logging
class CircuitBreaker:
def __init__(self, failure_threshold=5, timeout=60):
self.failure_threshold = failure_threshold
self.timeout = timeout
self.failures = 0
self.last_failure_time = None
self.state = "CLOSED"
def call(self, func, *args, **kwargs):
if self.state == "OPEN":
if time.time() - self.last_failure_time > self.timeout:
self.state = "HALF_OPEN"
else:
raise Exception("Circuit breaker OPEN")
try:
result = func(*args, **kwargs)
if self.state == "HALF_OPEN":
self.state = "CLOSED"
self.failures = 0
return result
except Exception as e:
self.failures += 1
self.last_failure_time = time.time()
if self.failures >= self.failure_threshold:
self.state = "OPEN"
raise
# Usage
cb = CircuitBreaker()
result = cb.call(risky_function)

Answer:

# Istio VirtualService
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: myapp
spec:
hosts:
- myapp
http:
- route:
- destination:
host: myapp
subset: v1
weight: 80
- destination:
host: myapp
subset: v2
weight: 20
- match:
- headers:
x-api-version:
exact: v2
route:
- destination:
host: myapp
subset: v2
retries:
attempts: 3
perTryTimeout: 2s
timeout: 10s
---
apiVersion: networking.istio.io/v1beta1
kind: DestinationRule
metadata:
name: myapp
spec:
host: myapp
trafficPolicy:
connectionPool:
tcp:
maxConnections: 100
http:
h2UpgradePolicy: UPGRADE
http1MaxPendingRequests: 100
http2MaxRequests: 1000
loadBalancer:
simple: ROUND_ROBIN

Q1331: How do you test network performance?

Section titled “Q1331: How do you test network performance?”

Answer:

Terminal window
# iperf server
iperf -s
# iperf client
iperf -c server-ip
iperf -c server-ip -P 4
iperf -c server-ip -M 1400
iperf -c server-ip -t 60
# iperf3
iperf3 -s
iperf3 -c server-ip
iperf3 -c server-ip -R # reverse
iperf3 -c server-ip -P 8 # parallel
# Network speed test
speedtest-cli
# Ping test
fping -g 192.168.1.0/24
ping -M do -s 1400 host
# Latency test
sockperf ping-pong --tcp -i server -p 12345
# MTU discovery
tracepath server
# Netcat throughput
nc -l -p 9000 > /dev/null &
nc -N server 9000 < /dev/zero

Answer:

Terminal window
# fio installation
apt install fio
# Sequential read
fio --name=seqread --readonly --direct=1 --ioengine=libaio \
--bs=4k --iodepth=32 --numjobs=4 --rw=readread \
--filename=/dev/sda1 --size=1G
# Random write
fio --name=randwrite --direct=1 --ioengine=libaio \
--bs=4k --iodepth=32 --numjobs=4 --rw=randwrite \
--filename=/tmp/test --size=1G
# Mixed workload
fio --name=mixed --direct=1 --ioengine=libaio \
--bs=4k --iodepth=1 --numjobs=1 --rw=randrw \
--rwmixread=70 --filename=/tmp/test
# Using hdparm
hdparm -t /dev/sda
hdparm -T /dev/sda
# Using dd
# Write speed
dd if=/dev/zero of=/tmp/test bs=1M count=1024 oflag=direct
# Read speed
dd if=/tmp/test of=/dev/null bs=1M count=1024 iflag=direct

Answer:

Terminal window
# Apache Bench (ab)
ab -n 10000 -c 100 http://localhost/index.html
ab -n 1000 -c 10 -t 60 http://localhost/api
# wrk
wrk -t4 -c100 -d30s http://localhost/
wrk -t2 -c50 -d30s --latency http://localhost/
# Custom script
wrk.method = "POST"
wrk.body = '{"data":"test"}'
wrk.headers["Content-Type"] = "application/json"
# siege
siege -c100 -r100 http://localhost/
siege -b -t5M http://localhost/
# Apache JMeter
# GUI
jmeter
# CLI
jmeter -n -t test.jmx -l results.jtl
# k6
# test.js
import http from 'k6/http';
import { check, sleep } from 'k6';
export const options = {
vus: 10,
duration: '30s',
};
export default function() {
const res = http.get('http://localhost/');
check(res, { 'status is 200': r => r.status === 200 });
sleep(1);
}

Q1334: How do you test database performance?

Section titled “Q1334: How do you test database performance?”

Answer:

Terminal window
# PostgreSQL
# EXPLAIN ANALYZE
EXPLAIN ANALYZE SELECT * FROM users WHERE email = 'test@example.com';
# pgbench
pgbench -i -s 100 mydb
pgbench -c 10 -j 2 -t 1000 mydb
# MySQL
# EXPLAIN
EXPLAIN SELECT * FROM users WHERE email = 'test@example.com';
# mysqlslap
mysqlslap --user=root --password --auto-generate-sql \
--concurrency=10 --iterations=5
# sysbench
sysbench /usr/share/sysbench/oltp_read_write.lua \
--mysql-host=localhost \
--mysql-db=test \
--threads=10 \
--time=60 \
run
# pt-query-digest
pt-query-digest slow-query.log

Q1335: How do you perform security testing?

Section titled “Q1335: How do you perform security testing?”

Answer:

Terminal window
# Nmap scans
nmap -sS -sV -O target
nmap -sV --script=vuln target
nmap -p- target
nmap --script=banner target
# OpenVAS
openvasmd --create-user admin
openvasmd --user=admin --new-password=password
openvasmd --update
openvasmd --rebuild
# Nikto
nikto -h http://target/
nikto -h https://target/ - SSL
# SQLMap
sqlmap -u "http://target/page.php?id=1" --dbs
sqlmap -u "http://target/page.php?id=1" -D database --tables
# XSS testing
# xsser
xsser -u "http://target/page?q="
# Directory enumeration
dirb http://target/
gobuster dir -u http://target/ -w /usr/share/wordlists/dirb/common.txt

Answer:

# Use minimal base image
FROM alpine:3.18
# Multi-stage build
FROM node:18-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
COPY . .
FROM node:18-alpine AS production
WORKDIR /app
COPY --from=builder /app/node_modules ./node_modules
COPY --from=builder /app .
USER node
EXPOSE 3000
CMD ["node", "index.js"]
# .dockerignore
node_modules
.git
*.md
.env*

Answer:

# Use specific version
FROM nginx:1.25-alpine
# Create non-root user
RUN addgroup -g 1000 -S appgroup && \
adduser -u 1000 -S appuser -G appgroup
# Set ownership
COPY --chown=appuser:appgroup . /usr/share/nginx/html
USER appuser
# Read-only filesystem
# docker run --read-only nginx
# Drop capabilities
# docker run --cap-drop ALL --cap-add NET_BIND_SERVICE nginx
# Limit resources
# docker run --memory=256m --cpus=0.5 nginx
# Scan images
trivy image myimage
docker scan myimage

Q1338: How do you configure Docker networking?

Section titled “Q1338: How do you configure Docker networking?”

Answer:

Terminal window
# Custom bridge network
docker network create --driver bridge mynetwork
docker network create --subnet=192.168.100.0/24 mynetwork
# Overlay network
docker network create --driver overlay myoverlay
# Host network
docker run --network host nginx
# Macvlan
docker network create -d macvlan \
--subnet=192.168.1.0/24 \
--gateway=192.168.1.1 \
-o parent=eth0 mymacvlan
# DNS configuration
docker run --dns 8.8.8.8 --network-alias myapp myimage
# Port mapping
docker run -p 8080:80 nginx
# Connect to network
docker network connect mynetwork container

Q1339: How do you configure Docker storage?

Section titled “Q1339: How do you configure Docker storage?”

Answer:

Terminal window
# Create volume
docker volume create mydata
# Mount volume
docker run -v mydata:/data mysql
# Bind mount
docker run -v /host/path:/container/path nginx
# tmpfs mount
docker run --tmpfs /tmp nginx
# NFS volume
docker volume create --driver local \
--opt type=nfs \
--opt o=addr=192.168.1.100,rw \
--opt device=:/path/to/share \
nfsvolume
# Backup volume
docker run --rm -v mydata:/data -v $(pwd):/backup alpine \
tar cvf /backup/backup.tar /data

Q1340: How do you configure Docker Swarm services?

Section titled “Q1340: How do you configure Docker Swarm services?”

Answer:

Terminal window
# Initialize swarm
docker swarm init --advertise-addr 192.168.1.10
# Create service
docker service create \
--name myapp \
--replicas 3 \
--publish 8080:80 \
--update-delay 10s \
--update-parallelism 1 \
--update-failure-action rollback \
--rollback-monitor 5s \
--rollback-max-failure-ratio 0.1 \
myimage:latest
# Update service
docker service update --image myimage:v2 myapp
# Scale
docker service scale myapp=5
# Networks
docker network create -d overlay myoverlay
docker service update --network-add myoverlay myapp
# Secrets
echo "mypassword" | docker secret create mysecret -
docker secret create mysecret mysecret.txt
docker service update --secret-add mysecret myapp

Q1341: How do you configure pod resources?

Section titled “Q1341: How do you configure pod resources?”

Answer:

apiVersion: v1
kind: Pod
metadata:
name: myapp
spec:
containers:
- name: myapp
image: myapp:latest
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 5
periodSeconds: 5

Q1342: How do you configure Kubernetes networking?

Section titled “Q1342: How do you configure Kubernetes networking?”

Answer:

apiVersion: v1
kind: Service
metadata:
name: myapp
spec:
selector:
app: myapp
ports:
- port: 80
targetPort: 8080
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: myapp-lb
spec:
selector:
app: myapp
ports:
- port: 80
targetPort: 8080
type: LoadBalancer
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myapp
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: myapp.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: myapp
port:
number: 80

Q1343: How do you configure persistent storage?

Section titled “Q1343: How do you configure persistent storage?”

Answer:

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: v1
kind: Pod
metadata:
name: myapp
spec:
containers:
- name: myapp
image: nginx
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
persistentVolumeClaim:
claimName: mypvc
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: fast
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-ssd
replication-type: regional-pd

Answer:

apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: pod-reader
rules:
- apiGroups: [""]
resources: ["pods", "pods/log"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: read-pods
subjects:
- kind: User
name: jane
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: pod-reader
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: secret-reader
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]

Answer:

Chart.yaml
apiVersion: v2
name: myapp
description: My application
type: application
version: 1.0.0
appVersion: "1.0"
# values.yaml
replicaCount: 3
image:
repository: myapp
pullPolicy: IfNotPresent
tag: "latest"
service:
type: ClusterIP
port: 80
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
hosts:
- host: myapp.example.com
paths:
- path: /
pathType: Prefix
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 250m
memory: 64Mi

Answer:

# Kubernetes HPA
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: myapp-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: myapp
minReplicas: 2
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 10
periodSeconds: 60
scaleUp:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 15

Q1347: How do you configure secrets management?

Section titled “Q1347: How do you configure secrets management?”

Answer:

Terminal window
# Using HashiCorp Vault
export VAULT_ADDR="https://vault.example.com:8200"
vault login -method=token token=<token>
# Kubernetes secrets
kubectl create secret generic mysecret \
--from-literal=username=admin \
--from-literal=password=secret
# Using Sealed Secrets
# Install controller
helm repo add sealed-secrets https://bitnami-labs.github.io/sealed-secrets
helm install sealed-secrets sealed-secrets/sealed-secrets
# Encrypt
kubeseal --format yaml < secret.yaml > sealed-secret.yaml
# AWS Secrets Manager
aws secretsmanager get-secret-value --secret-id mysecret
aws secretsmanager create-secret --name mysecret --secret-string '{"username":"admin","password":"secret"}'
# Use with CSI
kubectl apply -f secret-provider-class.yaml

Q1348: How do you configure CI/CD pipelines?

Section titled “Q1348: How do you configure CI/CD pipelines?”

Answer:

.gitlab-ci.yml
stages:
- build
- test
- deploy
variables:
DOCKER_IMAGE: registry.example.com/myapp
KUBECONFIG: /tmp/kubeconfig
build:
stage: build
image: docker:latest
services:
- docker:dind
script:
- docker build -t $DOCKER_IMAGE:$CI_COMMIT_SHA .
- docker push $DOCKER_IMAGE:$CI_COMMIT_SHA
test:
stage: test
image: myapp:test
script:
- npm test
- npm run lint
deploy:
stage: deploy
image: bitnami/kubectl:latest
script:
- kubectl set image deployment/myapp myapp=$DOCKER_IMAGE:$CI_COMMIT_SHA
- kubectl rollout status deployment/myapp
only:
- main

Q1349: How do you configure cloud storage?

Section titled “Q1349: How do you configure cloud storage?”

Answer:

Terminal window
# AWS S3
aws s3 ls s3://mybucket/
aws s3 cp file.txt s3://mybucket/
aws s3 sync ./folder s3://mybucket/folder
# S3FS mount
apt install s3fs-fuse
echo mybucket:access_key:secret_key > ~/.passwd-s3fs
chmod 600 ~/.passwd-s3fs
s3fs mybucket /mnt/s3 -o passwd_file=~/.passwd-s3fs
# MinIO client
mc alias set myminio http://localhost:9000 minioadmin minioadmin
mc ls myminio/
mc mirror ./data myminio/mybucket
# Google Cloud Storage
gsutil ls gs://mybucket/
gsutil cp file.txt gs://mybucket/
gsutil rsync -R ./folder gs://mybucket/folder
# Azure Blob
az storage container create --name mycontainer
az storage blob upload --container-name mycontainer --name myfile --file myfile.txt

Q1350: How do you configure multi-cloud deployment?

Section titled “Q1350: How do you configure multi-cloud deployment?”

Answer:

# Terraform multi-cloud
# main.tf
provider "aws" {
region = "us-east-1"
alias = "aws"
}
provider "google" {
project = "myproject"
region = "us-east1"
alias = "gcp"
}
resource "aws_instance" "aws_vm" {
provider = aws
ami = "ami-0c55b159cbfafe1f0"
instance_type = "t2.micro"
}
resource "google_compute_instance" "gcp_vm" {
provider = gcp
name = "gcp-vm"
machine_type = "e2-micro"
zone = "us-east1-b"
boot_disk {
initialize_params {
image = "debian-cloud/debian-11"
}
}
network_interface {
network = "default"
}
}
# Kubernetes multi-cluster
# kubeconfig
contexts:
- name: aws-cluster
context:
cluster: aws-cluster
user: admin
- name: gcp-cluster
context:
cluster: gcp-cluster
user: admin
# Deploy to both
kubectl --context=aws-cluster apply -f deployment.yaml
kubectl --context=gcp-cluster apply -f deployment.yaml

Q1351: How do you implement backup automation?

Section titled “Q1351: How do you implement backup automation?”

Answer:

#!/bin/bash
# Automated backup script
set -euo pipefail
BACKUP_DIR="/backup"
DATE=$(date +%Y%m%d_%H%M%S)
RETENTION_DAYS=30
# Database backup
backup_db() {
local db_name="$1"
local db_user="$2"
local db_pass="$3"
mysqldump -u"$db_user" -p"$db_pass" --single-transaction \
--routines --triggers "$db_name" | gzip > \
"$BACKUP_DIR/mysql/${db_name}_${DATE}.sql.gz"
}
# File backup
backup_files() {
local source="$1"
local dest="$2"
rsync -avz --delete \
--link-dest="$BACKUP_DIR/files/latest" \
"$source" "$BACKUP_DIR/files/${DATE}"
rm -f "$BACKUP_DIR/files/latest"
ln -s "${DATE}" "$BACKUP_DIR/files/latest"
}
# Clean old backups
cleanup() {
find "$BACKUP_DIR" -type f -mtime +$RETENTION_DAYS -delete
find "$BACKUP_DIR" -type d -mtime +$RETENTION_DAYS -exec rm -rf {} + 2>/dev/null || true
}
# Main
main() {
mkdir -p "$BACKUP_DIR/mysql" "$BACKUP_DIR/files"
backup_db "mydb" "backupuser" "password"
backup_files "/data" "files"
cleanup
echo "Backup completed at $(date)"
}
main

Q1352: How do you implement disaster recovery?

Section titled “Q1352: How do you implement disaster recovery?”

Answer:

/dr/runbook.md
# DR plan documentation
# 2. Recovery procedures
recover_database() {
# Stop applications
systemctl stop myapp
# Drop existing database
mysql -u root -p -e "DROP DATABASE IF EXISTS mydb;"
# Restore from backup
gunzip < /backup/mysql/mydb_20240101.sql.gz | mysql -u root -p mydb
# Verify
mysql -u root -p -e "USE mydb; SHOW TABLES;"
# Start applications
systemctl start myapp
}
# 3. Test DR
test_dr() {
echo "Starting DR test..."
# Spin up test environment
vagrant up dr-test
# Restore
vagrant ssh dr-test -c "/backup/scripts/restore.sh"
# Verify
vagrant ssh dr-test -c "curl http://localhost/health"
# Clean up
vagrant destroy dr-test
}
# 4. Document RTO/RPO
# RTO: 4 hours
# RPO: 24 hours

Q1353: How do you implement configuration management?

Section titled “Q1353: How do you implement configuration management?”

Answer:

Terminal window
# Ansible inventory
# inventory.ini
[webservers]
web1.example.com
web2.example.com
[databases]
db1.example.com
[webservers:vars]
nginx_version=1.25
app_port=8080
[all:vars]
env=production
# Ansible playbook
- name: Configure webservers
hosts: webservers
become: yes
tasks:
- name: Update packages
apt:
update_cache: yes
cache_valid_time: 3600
- name: Install nginx
apt:
name: nginx
state: present
- name: Configure nginx
template:
src: nginx.conf.j2
dest: /etc/nginx/nginx.conf
notify: restart nginx
- name: Start nginx
service:
name: nginx
state: started
enabled: yes
handlers:
- name: restart nginx
service:
name: nginx
state: restarted

Q1354: How do you implement monitoring alerting?

Section titled “Q1354: How do you implement monitoring alerting?”

Answer:

# Prometheus alert rules
groups:
- name: critical
interval: 30s
rules:
- alert: InstanceDown
expr: up == 0
for: 1m
labels:
severity: critical
annotations:
summary: "Instance {{ $labels.instance }} down"
- alert: HighCPU
expr: 100 - (avg by (instance) (irate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 90
for: 5m
labels:
severity: warning
- alert: DiskSpace
expr: (node_filesystem_avail_bytes / node_filesystem_size_bytes) < 0.1
for: 10m
labels:
severity: warning
# AlertManager config
# alertmanager.yml
route:
group_by: ['alertname']
group_wait: 10s
group_interval: 10s
repeat_interval: 12h
receiver: 'team-notifications'
receivers:
- name: 'team-notifications'
email_configs:
- to: 'team@example.com'
send_resolved: true
slack_configs:
- api_url: 'https://hooks.slack.com/services/XXX'

Q1355: How do you implement log management?

Section titled “Q1355: How do you implement log management?”

Answer:

Terminal window
# Filebeat configuration
filebeat.inputs:
- type: log
paths:
- /var/log/*.log
- /var/log/*/*.log
fields:
type: syslog
fields_under_root: true
processors:
- add_host_metadata:
fields_under_root: true
- add_docker_metadata: ~
- add_cloud_metadata: ~
output.logstash:
hosts: ["logstash:5044"]
# Log rotation
# /etc/logrotate.d/myapp
/var/log/myapp/*.log {
daily
missingok
rotate 14
compress
delaycompress
notifempty
create 0640 myapp myapp
postrotate
systemctl reload myapp > /dev/null 2>&1 || true
endscript
}
# Log analysis script
analyze_logs() {
local logfile="$1"
# Error count
echo "Errors: $(grep -c ERROR "$logfile")"
# Top errors
echo "Top errors:"
grep ERROR "$logfile" | cut -d: -f4- | sort | uniq -c | sort -rn | head -10
# Time distribution
echo "Time distribution:"
awk '/ERROR/ {print $2}' "$logfile" | cut -d: -h1 | sort | uniq -c
}

Answer:

/etc/default/grub
# Configure kdump
GRUB_CMDLINE_LINUX="crashkernel=auto"
# Install kdump
apt install kdump-tools
# Configure
# /etc/kdump.conf
path /var/crash
core_collector makedumpfile -c --page-analysis
# Enable
systemctl enable kdump
systemctl start kdump
# Test
echo c > /proc/sysrq-trigger
# Analyze crash dump
crash /var/crash/2024-01-01-01:01/vmcore /usr/lib/debug/boot/vmlinux-$(uname -r)
# Kernel panic symptoms
# "Kernel panic - not syncing: VFS: Unable to mount root fs"
# "Kernel panic - not syncing: Out of memory and no killable processes"
# Debug
# /etc/sysctl.conf
kernel.panic=10
kernel.panic_on_oops=1

Answer:

Terminal window
# Using valgrind
valgrind --leak-check=full --show-leak-kinds=all --track-origins=yes ./program
# Using memleax
memleax -p $(pgrep -f myapp)
# System memory analysis
ps aux --sort=-rss | head
pmap -x $(pgrep -f myapp)
# Using /proc
cat /proc/$(pgrep -f myapp)/status | grep -i vm
# Using smem
smem -m
smem -r -k
# gdb debugging
gdb -p $(pgrep -f myapp)
(gdb) info proc mappings
(gdb) info registers
# For Java
jmap -heap $(pgrep -f java)
jmap -histo $(pgrep -f java) | head -20
jmap -dump:format=b,file=heap.bin $(pgrep -f java)
# For Python
python -m memory_profiler program.py

Answer:

Terminal window
# iostat
iostat -xz 1
# iotop
iotop -o
# pidstat
pidstat -d 1
pidstat -D sda 1
# blockdev
blockdev --getsize64 /dev/sda
blockdev --report /dev/sda
# hdparm
hdparm -tT /dev/sda
# ftrace
# Block I/O tracing
echo nop > /sys/kernel/debug/tracing/current_tracer
echo 1 > /sys/kernel/debug/tracing/events/block/block_rq_issue/enable
cat /sys/kernel/debug/tracing/trace_pipe
# strace for I/O
strace -e trace=openat,read,write -p $(pgrep -f myapp)
# ioping
ioping -c 10 /dev/sda
ioping -c 10 /mnt/data

Answer:

Terminal window
# Query tests
dig @8.8.8.8 example.com
dig +trace example.com
nslookup example.com 8.8.8.8
host -v example.com
# Check resolver
cat /etc/resolv.conf
systemd-resolve --status
# Check DNS cache
# systemd-resolved
systemd-resolve --flush-caches
# nscd
nscd -i hosts
# Check server
dig @ns1.example.com example.com AXFR
# tcpdump
tcpdump -i eth0 -n port 53
# Check resolution order
getent hosts example.com
# Debug nsswitch
# /etc/nsswitch.conf
hosts: files dns
# Test with strace
strace -e openat,read nslookup example.com

Q1360: How do you debug performance regressions?

Section titled “Q1360: How do you debug performance regressions?”

Answer:

Terminal window
# Baseline comparison
# Save baseline
sar -A -o baseline.data 1 60
# Compare with regression
sar -A -o regression.data 1 60
# Using sadf
sadf -d baseline.data | head
sadf -d regression.data | head
# Using SAR for comparison
sar -q baseline.data > baseline_q.txt
sar -q regression.data > regression_q.txt
diff baseline_q.txt regression_q.txt
# Using perf
perf stat -a --repeat=3 ./benchmark
perf record -g ./regression
perf report
# Using bpfb
# Baseline
bpftrace -e 'kprobe:do_nanosleep { @start = nsecs(); }' > baseline.txt
# Regression
bpftrace -e 'kprobe:do_nanosleep { @start = nsecs(); }' > regression.txt

Q1361: How do you perform kernel upgrades?

Section titled “Q1361: How do you perform kernel upgrades?”

Answer:

Terminal window
# Debian/Ubuntu
apt update
apt list --upgradable
apt upgrade
apt-get dist-upgrade
reboot
# RHEL/CentOS
yum update
reboot
# Manual kernel compile
cd /usr/src
wget https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.15.tar.xz
tar -xJf linux-5.15.tar.xz
cd linux-5.15
cp /boot/config-$(uname -r) .config
make menuconfig
make -j$(nproc)
make modules_install
make install
update-grub
reboot

Answer:

Terminal window
# Database migration
# 1. Export data
mysqldump -u root -p mydb > mydb.sql
# 2. Copy to new server
rsync -avz mydb.sql newserver:/tmp/
# 3. Import
mysql -u root -p mydb < mydb.sql
# 4. Verify
mysql -u root -p -e "USE mydb; SHOW TABLES;"
# Application migration
# 1. Stop application
systemctl stop myapp
# 2. Sync files
rsync -avz --delete /var/www/html/ newserver:/var/www/html/
# 3. Copy configs
rsync -avz /etc/nginx/ newserver:/etc/nginx/
# 4. DNS switch
# Update DNS records
# or
# Use load balancer
# 5. Start on new server
systemctl start myapp

Answer:

Terminal window
# CPU capacity
# Current usage
sar -u 1
# Projected
# (current_avg * growth_factor * days) / capacity
# Memory capacity
free -h
vmstat 1
# Disk capacity
df -h
# Monitor trends
iostat -x 1
# Network capacity
iftop
nethogs
# Calculate requirements
# CPU: (peak_cpu / cores) * growth_factor
# Memory: (peak_mem * growth_factor) + headroom
# Disk: (current_disk * (1 + growth_rate)^years)
# Network: peak_bandwidth * redundancy
# Tools
# Prometheus + node_exporter
# Grafana dashboards
# Custom scripts

Q1364: How do you perform security audits?

Section titled “Q1364: How do you perform security audits?”

Answer:

Terminal window
# Install audit tools
apt install lynis rkhunter chkrootkit
# Run Lynis
lynis audit system
lynis audit system --profile cis-ubuntu-22.04
# Run RKHunter
rkhunter --check
rkhunter --propupd
# Run CHKRootKit
chkrootkit
# Check open ports
nmap -sT -O localhost
ss -tulpn
# Check users
awk -F: '($3 == 0) {print $1}' /etc/passwd
lastlog
# Check logs
grep -i "failed password" /var/log/auth.log
grep -i "invalid user" /var/log/auth.log
# File integrity
aide --check
tripwire --check

Q1365: How do you implement documentation?

Section titled “Q1365: How do you implement documentation?”

Answer:

# System Documentation
## Overview
- Purpose: Production web server
- OS: Ubuntu 22.04 LTS
- Hostname: web01.example.com
## Hardware
- CPU: 4 vCPU
- RAM: 8 GB
- Disk: 100 GB SSD
## Network
- IP: 192.168.1.10
- Gateway: 192.168.1.1
- DNS: 8.8.8.8, 8.8.4.4
## Services
| Service | Port | Status | Auto-start |
|---------|------|--------|------------|
| nginx | 80, 443 | running | yes |
| php-fpm | 9000 | running | yes |
| mysql | 3306 | running | yes |
## Backups
- Schedule: Daily at 2 AM
- Retention: 30 days
- Location: /backup
## Monitoring
- Prometheus: http://monitoring:9090
- Grafana: http://monitoring:3000
- Alerts: #ops-alerts
## Runbooks
- [Service restart](runbooks/service-restart.md)
- [Disk full](runbooks/disk-full.md)
- [High CPU](runbooks/high-cpu.md)

Q1366: How do you implement zero trust security?

Section titled “Q1366: How do you implement zero trust security?”

Answer:

Terminal window
# Network policies (Kubernetes)
kubectl apply -f - <<EOF
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-to-dns
spec:
podSelector:
matchLabels:
app: myapp
egress:
- to:
- namespaceSelector:
matchLabels:
kubernetes.io/metadata.name: kube-system
ports:
- protocol: UDP
port: 53
EOF
# iptables zero trust
iptables -P INPUT DROP
iptables -P FORWARD DROP
iptables -P OUTPUT ACCEPT
iptables -A INPUT -i lo -j ACCEPT
iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
iptables -A INPUT -p tcp --dport 22 -j DROP
# mTLS with Istio
# See service mesh configuration

Q1367: How do you implement immutable infrastructure?

Section titled “Q1367: How do you implement immutable infrastructure?”

Answer:

Terminal window
# Packer for immutable images
packer build template.json
# Use cloud-init for configuration
# cloud-config.yaml
#cloud-config
package_update: true
packages:
- nginx
# No SSH access, use session manager
# AWS Systems Manager Session Manager
# Install ssm agent
yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
systemctl enable amazon-ssm-agent
systemctl start amazon-ssm-agent
# Use containers instead of VMs
# Deploy via CI/CD
# Never SSH into production
# Rollback by redeploying

Q1368: How do you implement feature flags?

Section titled “Q1368: How do you implement feature flags?”

Answer:

# Simple feature flag implementation
import json
from functools import wraps
class FeatureFlags:
def __init__(self, flags_file):
self.flags = self._load_flags(flags_file)
def _load_flags(self, flags_file):
with open(flags_file) as f:
return json.load(f)
def is_enabled(self, flag_name):
return self.flags.get(flag_name, {}).get('enabled', False)
def get_variant(self, flag_name):
return self.flags.get(flag_name, {}).get('variant', 'control')
flags = FeatureFlags('/etc/flags.json')
# Usage
if flags.is_enabled('new_checkout'):
return render_new_checkout()
else:
return render_old_checkout()
# Using LaunchDarkly
import launchdarkly as ld
ld_client = ld.LDClient("sdk-key")
feature_flag = ld_client.variation("new-feature", {"key": "user123"}, False)

Q1369: How do you implement service catalog?

Section titled “Q1369: How do you implement service catalog?”

Answer:

Terminal window
# Backstage installation
helm repo add backstage https://backstage.github.io/helm-charts
helm install backstage backstage/backstage
# Create catalog
# catalog-info.yaml
apiVersion: backstage.io/v1alpha1
kind: Component
metadata:
name: my-service
annotations:
github.com/project-slug: org/repo
spec:
type: service
lifecycle: production
owner: team-a
providesApis:
- my-service-api
# Register in Backstage
# app-config.yaml
catalog:
locations:
- type: url
target: https://github.com/org/repo/blob/main/catalog-info.yaml
# Add to service catalog
kubectl apply -f catalog-info.yaml

Q1370: How do you optimize Linux for containers?

Section titled “Q1370: How do you optimize Linux for containers?”

Answer:

/etc/sysctl.conf
# Kernel parameters for containers
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
# Disable swap
vm.swappiness=0
# File limits
fs.file-max=65536
# Network
net.core.somaxconn=1024
net.ipv4.tcp_max_syn_backlog=2048
# Apply
sysctl -p
# Container-optimized OS
# Use Ubuntu Core, RancherOS, Flatcar
# Docker daemon
# /etc/docker/daemon.json
{
"storage-driver": "overlay2",
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"live-restore": true,
"default-ulimits": {
"nofile": {
"Name": "nofile",
"Hard": 64000,
"Soft": 64000
}
}
}

Answer:

repo/
# 1. Git repository structure
# ├── base/
# │ ├── deployment.yaml
# │ └── service.yaml
# ├── overlays/
# │ ├── dev/
# │ │ └── kustomization.yaml
# │ ├── staging/
# │ │ └── kustomization.yaml
# │ └── prod/
# │ └── kustomization.yaml
# 2. Kustomize
# overlays/prod/kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../../base
patchesStrategicMerge:
- replica-patch.yaml
replicas:
- name: myapp
count: 5
# 3. Apply
kustomize build overlays/prod | kubectl apply -f -
# 4. ArgoCD
argocd app create myapp \
--repo https://github.com/org/repo \
--path overlays/prod \
--dest-server https://kubernetes.default.svc \
--dest-namespace default

Q1372: How do you implement chaos engineering?

Section titled “Q1372: How do you implement chaos engineering?”

Answer:

Terminal window
# Install Chaos Mesh
helm repo add chaos-mesh https://charts.chaos-mesh.org
helm install chaos-mesh chaos-mesh/chaos-mesh -n chaos-mesh --create-namespace
# Kubernetes chaos experiment
apiVersion: chaos-mesh.org/v1alpha1
kind: PodChaos
metadata:
name: pod-failure
spec:
action: pod-failure
mode: one
duration: "60s"
selector:
namespaces:
- default
labelSelectors:
app: myapp
# Gremlin-like attacks
# CPU
echo 1 > /proc/sys/kernel/sysrq
echo c > /proc/sysrq-trigger
# Memory
# malloc until OOM
# See stress tool
# Using chaoskube
chaoskube --interval=30s --labels=app=test
# Litmus
litmusctl run chaos -f ./chaos-experiment.yaml

Q1373: How do you implement service mesh observability?

Section titled “Q1373: How do you implement service mesh observability?”

Answer:

Terminal window
# Istio telemetry
# Enable tracing
istioctl install --set values.telemetry.enabled=true
# Configure tracing
# istio configmap
apiVersion: v1
kind: ConfigMap
metadata:
name: istio
data:
meshConfig: |
enableTracing: true
defaultConfig:
tracing:
sampling: 10
zipkin:
address: jaeger-collector.observability:9411
# Access dashboards
# Jaeger
kubectl port-forward -n istio-system svc/jaeger-query 16686:16686
# Kiali
kubectl port-forward -n istio-system svc/kiali 20001:20001
# Grafana
kubectl port-forward -n istio-system svc/grafana 3000:3000

Q1374: How do you implement multi-tenancy?

Section titled “Q1374: How do you implement multi-tenancy?”

Answer:

Terminal window
# Kubernetes namespaces
kubectl create namespace tenant1
kubectl create namespace tenant2
# Resource quotas
apiVersion: v1
kind: ResourceQuota
metadata:
name: tenant1-quota
spec:
hard:
requests.cpu: "4"
requests.memory: 8Gi
limits.cpu: "8"
limits.memory: 16Gi
pods: "20"
services: "10"
secrets: "20"
configmaps: "20"
# Limit ranges
apiVersion: v1
kind: LimitRange
metadata:
name: tenant1-limits
spec:
limits:
- max:
cpu: "2"
memory: "4Gi"
min:
cpu: "100m"
memory: "128Mi"
default:
cpu: "500m"
memory: "1Gi"
defaultRequest:
cpu: "200m"
memory: "512Mi"
type: Container
# RBAC
kubectl create rolebinding tenant1-admin \
--role=admin \
--user=user1 \
--namespace=tenant1