Skip to content

Linux_Practical_Interview_251 500

Linux Practical Interview Questions (251-500)

Section titled “Linux Practical Interview Questions (251-500)”

Q241: How do you create and manage ext4 filesystem?

Section titled “Q241: How do you create and manage ext4 filesystem?”

Answer:

Terminal window
# Create ext4
mkfs.ext4 /dev/sdb1
# With options
mkfs.ext4 -T largefile -m 0 /dev/sdb1
# Tune
tune2fs -c 30 /dev/sdb1 # check every 30 mounts
tune2fs -i 30d /dev/sdb1 # check every 30 days
# Check
e2fsck -f /dev/sdb1
# Resize
resize2fs /dev/sdb1 # to fill partition
resize2fs /dev/sdb1 10G # shrink to 10G

Answer:

Terminal window
# Create XFS
mkfs.xfs /dev/sdb1
# With options
mkfs.xfs -f -d agcount=4 -l size=100m /dev/sdb1
# Check
xfs_repair /dev/sdb1
# Grow
xfs_growfs /mount/point
# Repair
xfs_repair -n /dev/sdb1 # just check

Answer:

Terminal window
# Create
mkfs.btrfs -L mydata /dev/sdb
# RAID levels
mkfs.btrfs -d raid1 -m raid1 /dev/sdb /dev/sdc
# Subvolumes
mount /dev/sdb /mnt/btrfs
btrfs subvolume create /mnt/btrfs/vol1
# Snapshots
btrfs subvolume snapshot /mnt/btrfs/vol1 /mnt/btrfs/snap1
# Properties
btrfs property set /mnt/btrfs/vol1 compression zstd

Answer:

Terminal window
# Enable quota
quotacheck -cug /home
# Set quota
edquota -u username
# Edit soft/hard limits
# Check quota
quota -u username
quota -g groupname
# Report
repquota -a
repquota -aug
# Enable at boot
# /etc/fstab
/dev/sda1 /home ext4 defaults,usrquota,grpquota 0 2

Q245: How do you use LVM thin provisioning?

Section titled “Q245: How do you use LVM thin provisioning?”

Answer:

Terminal window
# Create thin pool
lvcreate -L 100G --thinpool thin_pool vg_data
# Create thin volume
lvcreate -V 200G --thin -n thin_vol vg_data/thin_pool
# Monitor
lvs -a
lvdisplay vg_data/thin_pool
# Extend pool
lvextend -L +50G vg_data/thin_pool

Q246: How do you configure network bridging?

Section titled “Q246: How do you configure network bridging?”

Answer:

Terminal window
# Using brctl
brctl addbr br0
brctl addif br0 eth0
brctl addif br0 eth1
ip addr add 192.168.1.10/24 dev br0
# Using ip
ip link add name br0 type bridge
ip link set eth0 master br0
ip link set eth1 master br0
ip link set br0 up
# Make persistent (systemd)
cat > /etc/systemd/network/25-bridge.network <<EOF
[Match]
Name=br0
[Network]
Address=192.168.1.10/24
Gateway=192.168.1.1
EOF

Answer:

Terminal window
# Using vconfig
vconfig add eth0 100
# Using ip
ip link add link eth0 name eth0.100 type vlan id 100
ip link set eth0.100 up
ip addr add 192.168.100.10/24 dev eth0.100
# Persistent (CentOS)
cat > /etc/sysconfig/network-scripts/ifcfg-eth0.100 <<EOF
DEVICE=eth0.100
VLAN=yes
ONBOOT=yes
IPADDR=192.168.100.10
NETMASK=255.255.255.0
EOF

Answer:

Terminal window
# GRE tunnel
ip tunnel add gre0 mode gre remote 192.168.1.10 local 192.168.1.20
ip link set gre0 up
ip addr add 10.0.0.1/30 dev gre0
# IPIP tunnel
ip tunnel add ipip0 mode ipip remote 192.168.1.10 local 192.168.1.20
ip link set ipip0 up
# WireGuard
wg genkey | tee private.key | wg pubkey > public.key
# OpenVPN tunnel
openvpn --dev tun0 --ifconfig 10.0.0.1 10.0.0.2

Q249: How do you configure policy routing?

Section titled “Q249: How do you configure policy routing?”

Answer:

Terminal window
# Create routing table
echo "200 custom" >> /etc/iproute2/rt_tables
# Add rule
ip rule add from 192.168.1.10 table custom
ip rule add to 192.168.1.10 table custom
# Add route
ip route add 10.0.0.0/24 via 192.168.1.1 table custom
# NAT/Masquerade
iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -o eth0 -j MASQUERADE
# Make persistent
cat > /etc/network/if-up.d/custom-routes <<EOF
#!/bin/sh
ip rule add from 192.168.1.10 table custom
EOF

Answer:

/etc/resolv.conf
nameserver 8.8.8.8
nameserver 8.8.4.4
search example.com
# Using systemd-resolved
systemctl enable systemd-resolved
systemctl start systemd-resolved
resolvectl status
# Override DNS per interface
# /etc/systemd/resolved.conf.d/dns.conf
[Resolve]
DNS=1.1.1.1
Domains=example.com
# Using dhclient
# /etc/dhcp/dhclient.conf
supersede domain-name-servers 8.8.8.8, 8.8.4.4;

Answer:

Terminal window
# Temporary
ip route add 192.168.100.0/24 via 192.168.1.1 dev eth0
# Persistent (Debian)
# /etc/network/interfaces
up ip route add 192.168.100.0/24 via 192.168.1.1
# Persistent (CentOS)
cat > /etc/sysconfig/network-scripts/route-eth0 <<EOF
192.168.100.0/24 via 192.168.1.1
EOF
# Default route
ip route add default via 192.168.1.1

Q252: How do you use iproute2 for advanced routing?

Section titled “Q252: How do you use iproute2 for advanced routing?”

Answer:

Terminal window
# Load balancing
ip route add default scope global \
nexthop via 192.168.1.1 dev eth0 weight 1 \
nexthop via 192.168.2.1 dev eth1 weight 1
# Multipath
ip route add 10.0.0.0/8 \
nexthop via 192.168.1.1 dev eth0 \
nexthop via 192.168.2.1 dev eth1
# Blackhole route
ip route add blackhole 10.0.0.0/8
# Monitor
ip monitor route

Q253: How do you configure network bonding (advanced)?

Section titled “Q253: How do you configure network bonding (advanced)?”

Answer:

Terminal window
# Mode 4 (LACP)
cat > /etc/modprobe.d/bonding.conf <<EOF
options bond0 mode=4 miimon=100 lacp_rate=1
EOF
# Team driver (CentOS)
nmcli con add type team ifname team0 con-name team0
nmcli con add type team-slave ifname eth0 master team0
nmcli con add type team-slave ifname eth1 master team0
nmcli con modify team0 team.runner lacp

Answer:

Terminal window
# Check settings
ethtool eth0
# Set speed/duplex
ethtool -s eth0 speed 1000 duplex full autoneg off
# Enable wake-on-LAN
ethtool -s eth0 wol g
# Check driver info
ethtool -i eth0
# Ring buffers
ethtool -G eth0 rx 4096 tx 4096
# Offloads
ethtool -K eth0 tso on gso on gro on

Q255: How do you configure network namespace?

Section titled “Q255: How do you configure network namespace?”

Answer:

Terminal window
# Create namespace
ip netns add red
ip netns add blue
# List namespaces
ip netns list
# Execute in namespace
ip netns exec red ip link
ip netns exec red ping 8.8.8.8
# Connect namespaces with veth
ip link add veth0 type veth peer name veth1
ip link set veth1 netns red
ip netns exec red ip link set veth1 name eth0
# Delete
ip netns delete red

Answer:

Terminal window
# CPU profiling
perf record -g ./program
perf record -p PID
perf record -a -g # system-wide
# Report
perf report
perf report --stdio
# Top
perf top
perf top -p PID
# Specific events
perf stat -e cycles,instructions,cache-misses ./program
perf stat -e 'syscalls:sys_enter_*' -a
# Trace
perf record -e sched:sched_switch -a
perf script

Answer:

Terminal window
# Check memory usage
free -h
cat /proc/meminfo
# Per-process memory
ps aux --sort=-%mem | head
pmap -X PID
# Check for memory leaks
valgrind --leak-check=full ./program
# Slab info
slabtop
cat /proc/slabinfo
# Huge pages
cat /proc/meminfo | grep -i huge
# OOM killer
dmesg | grep -i "out of memory"
cat /var/log/messages | grep -i oom

Answer:

Terminal window
# iostat
iostat -x 1
iostat -x -d sda 1
# iotop
iotop
iotop -o # only active
iotop -P # processes only
# blktrace
blktrace -d /dev/sda -o /tmp/blk
blkparse -i /tmp/blk
# Latency
ioping -c 10 /dev/sda

Answer:

Terminal window
# Network stats
netstat -s
ss -s
# Per-socket stats
ss -tunapl
# Bandwidth
nethogs eth0
iftop
# Latency
ping -c 10 8.8.8.8
traceroute 8.8.8.8
mtr 8.8.8.8
# Capture
tcpdump -i eth0 host 192.168.1.1
tcpdump -i eth0 port 80

Answer:

Terminal window
# Basic
strace -p PID
strace -c command
# Trace specific calls
strace -e trace=read,write -p PID
strace -e trace=network -p PID
strace -e trace=file -p PID
# Timestamps
strace -t -p PID
strace -tt -p PID
strace -T -p PID
# Follow forks
strace -f -p PID
# Output to file
strace -o output.txt -p PID
# Attach to running process
strace -p PID

Answer:

Terminal window
# Basic
ltrace -p PID
ltrace -c command
# Library calls
ltrace -l lib.so command
ltrace -L command
# Time spent
ltrace -T command
# Follow child processes
ltrace -f command
# Filter
ltrace -e "*malloc*+*free*" command
# Attach to process
ltrace -p PID

Answer:

Terminal window
# Basic
vmstat 1
# Memory details
vmstat -m
# Disk
vmstat -d
# Summary
vmstat -s
# Active/inactive memory
vmstat -a

Answer:

Terminal window
# All CPUs
mpstat
# Interval
mpstat 2 5
# Per processor
mpstat -P ALL 1 3
# Specific CPU
mpstat -P 0 1 5
# Fields
mpstat -A

Q264: How do you tune kernel parameters for performance?

Section titled “Q264: How do you tune kernel parameters for performance?”

Answer:

/etc/sysctl.conf
# Network
net.core.rmem_max=16777216
net.core.wmem_max=16777216
net.ipv4.tcp_rmem=4096 87380 16777216
net.ipv4.tcp_wmem=4096 65536 16777216
# File descriptors
fs.file-max=65535
# Kernel
kernel.shmmax=68719476736
kernel.shmall=4294967296
# Apply
sysctl -p

Answer:

/etc/security/limits.conf
* soft nofile 65535
* hard nofile 65535
* soft nproc 4096
* hard nproc 8192
root soft nofile unlimited
# Kernel limits
echo 65535 > /proc/sys/fs/file-max
# Apply without logout
ulimit -n 65535
sysctl -w fs.file-max=65535

Answer:

# Use multi-stage builds
FROM golang:1.20 AS builder
WORKDIR /app
COPY . .
RUN go build -o main .
FROM alpine:3.18
COPY --from=builder /app/main /app/main
CMD ["/app/main"]
# .dockerignore
node_modules
.git
*.md
# Build args
ARG VERSION
ENV VERSION=$VERSION

Answer:

Terminal window
# Run as non-root
docker run -u 1000:1000 nginx
# Read-only
docker run --read-only nginx
# No new privileges
docker run --security-opt no-new-privileges:true nginx
# Drop capabilities
docker run --cap-drop ALL --cap-add NET_BIND_SERVICE nginx
# AppArmor
docker run --security-opt apparmor=profile nginx
# Seccomp
docker run --security-opt seccomp=default nginx

Answer:

Terminal window
# Create network
docker network create mynet
# Host network
docker run --network host nginx
# Bridge
docker run --network bridge nginx
# Overlay (swarm)
docker network create --driver overlay myoverlay
# DNS
docker run --dns 8.8.8.8 nginx
docker run --network-alias myapp nginx

Answer:

Terminal window
# Bind mount
docker run -v /host/path:/container/path nginx
# Named volume
docker volume create mydata
docker run -v mydata:/data nginx
# tmpfs
docker run --tmpfs /run nginx
# Read-only
docker run -v /data:/data:ro nginx
# Backup volume
docker run --rm -v mydata:/data -v $(pwd):/backup alpine tar cvf /backup/backup.tar /data

Answer:

version: '3.8'
services:
web:
build: .
ports:
- "80:80"
environment:
- NODE_ENV=production
depends_on:
- db
networks:
- frontend
- backend
db:
image: postgres:14
volumes:
- dbdata:/var/lib/postgresql/data
networks:
- backend
networks:
frontend:
backend:
volumes:
dbdata:

Answer:

Terminal window
# Rootless
podman run -d nginx
# Build
podman build -t myimage .
podman build -f Dockerfile.custom
# Systemd integration
podman generate systemd --name container > container.service
# Quadlet
cat > container.container <<EOF
[Container]
Image=nginx
PublishPort=8080:80
[Service]
Restart=always
EOF
systemctl --user start container

Answer:

Terminal window
# Build from Dockerfile
buildah bud -t myimage .
# Build without Dockerfile
buildah from alpine
buildah run alpine -- apk add nginx
buildah commit alpine myimage
# Layers
buildah bud --layers -t myapp
# Push
buildah push myimage docker://registry/myimage:latest

Answer:

Terminal window
# Inspect
skopeo inspect docker://nginx:latest
# Copy
skopeo copy docker://src/nginx:latest docker://dest/nginx:latest
# List tags
skopeo list-tags docker://registry/myimage
# Sync
skopeo sync --src docker --dest dir nginx /local/path

Answer:

Terminal window
# Install
apt install containerd
# Configure /etc/containerd/config.toml
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.k8s.io/pause:3.9"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runtime.v1.linux"

Answer:

Terminal window
# Configure
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///var/run/dockershim.sock
image-endpoint: unix:///var/run/dockershim.sock
EOF
# Pull
crictl pull nginx:latest
# Run
crictl run -r docker pod.json container.json
# List
crictl pods
crictl ps
# Logs
crictl logs container-id

Q276: How do you create Kubernetes deployment?

Section titled “Q276: How do you create Kubernetes deployment?”

Answer:

apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
labels:
app: myapp
spec:
replicas: 3
selector:
matchLabels:
app: myapp
template:
metadata:
labels:
app: myapp
spec:
containers:
- name: myapp
image: nginx:latest
ports:
- containerPort: 80
resources:
requests:
memory: "64Mi"
cpu: "250m"
limits:
memory: "128Mi"
cpu: "500m"

Q277: How do you create Kubernetes service?

Section titled “Q277: How do you create Kubernetes service?”

Answer:

# ClusterIP
apiVersion: v1
kind: Service
metadata:
name: myapp-svc
spec:
type: ClusterIP
selector:
app: myapp
ports:
- port: 80
targetPort: 80
# NodePort
spec:
type: NodePort
selector:
app: myapp
ports:
- port: 80
targetPort: 80
nodePort: 30080
# LoadBalancer
spec:
type: LoadBalancer
selector:
app: myapp
ports:
- port: 80
targetPort: 80

Q278: How do you create Kubernetes ingress?

Section titled “Q278: How do you create Kubernetes ingress?”

Answer:

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myapp-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: myapp.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: myapp-svc
port:
number: 80
tls:
- hosts:
- myapp.example.com
secretName: tls-secret

Q279: How do you create Kubernetes configmap and secrets?

Section titled “Q279: How do you create Kubernetes configmap and secrets?”

Answer:

# ConfigMap
apiVersion: v1
kind: ConfigMap
metadata:
name: app-config
data:
database_url: "postgres://db:5432/myapp"
cache_enabled: "true"
---
# Secret
apiVersion: v1
kind: Secret
metadata:
name: db-credentials
type: Opaque
stringData:
username: admin
password: secret123

Q280: How do you create Kubernetes persistent volume?

Section titled “Q280: How do you create Kubernetes persistent volume?”

Answer:

# PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: gp3
---
# Pod using PVC
spec:
containers:
- name: app
image: nginx
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
persistentVolumeClaim:
claimName: mypvc

Q281: How do you create Kubernetes statefulset?

Section titled “Q281: How do you create Kubernetes statefulset?”

Answer:

apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mysql
spec:
serviceName: mysql
replicas: 3
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
containers:
- name: mysql
image: mysql:8
volumeMounts:
- name: data
mountPath: /var/lib/mysql
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi

Q282: How do you create Kubernetes job and cronjob?

Section titled “Q282: How do you create Kubernetes job and cronjob?”

Answer:

# Job
apiVersion: batch/v1
kind: Job
metadata:
name: myjob
spec:
template:
spec:
containers:
- name: job
image: busybox
command: ["echo", "Hello from job"]
restartPolicy: OnFailure
---
# CronJob
apiVersion: batch/v1
kind: CronJob
metadata:
name: mycronjob
spec:
schedule: "0 * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: cron
image: busybox
command: ["echo", "Hello from cron"]
restartPolicy: OnFailure

Answer:

Terminal window
# Add repo
helm repo add stable https://charts.helm.sh/stable
helm repo update
# Install
helm install myrelease stable/nginx
helm install myrelease stable/nginx --set service.type=NodePort
# Upgrade
helm upgrade myrelease stable/nginx
# Template
helm template myrelease stable/nginx -f values.yaml
# Package
helm package mychart/
# Hooks
helm install --dry-run --debug

Answer:

kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
namespace: production
commonLabels:
app: myapp
configMapGenerator:
- name: app-config
literals:
- DEBUG=false
replicas:
- name: deployment
count: 3
patches:
- patch: |-
- op: replace
path: /spec/replicas
value: 5
target:
kind: Deployment

Answer:

Terminal window
# Install server
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--disable traefik" sh -
# Token
cat /var/lib/rancher/k3s/server/node-token
# Install agent
curl -sfL https://get.k3s.io | \
K3S_URL=https://server:6443 \
K3S_TOKEN=TOKEN \
sh -
# kubectl
mkdir -p ~/.kube
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
chmod 600 ~/.kube/config

Answer:

/etc/ssh/sshd_config
PermitRootLogin no
PubkeyAuthentication yes
PasswordAuthentication no
PermitEmptyPasswords no
MaxAuthTries 3
ClientAliveInterval 300
X11Forwarding no
AllowTcpForwarding no
ForceCommand none
# Restart
systemctl restart sshd
# Generate keys
ssh-keygen -t ed25519 -C "admin@host"

Q287: How do you configure firewall (iptables)?

Section titled “Q287: How do you configure firewall (iptables)?”

Answer:

Terminal window
# Default policies
iptables -P INPUT DROP
iptables -P FORWARD DROP
iptables -P OUTPUT ACCEPT
# Allow loopback
iptables -A INPUT -i lo -j ACCEPT
# Allow established
iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
# Allow services
iptables -A INPUT -p tcp --dport 22 -j ACCEPT
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
iptables -A INPUT -p tcp --dport 443 -j ACCEPT
# Save
iptables-save > /etc/iptables/rules.v4

Answer:

/etc/fail2ban/jail.local
# Install
apt install fail2ban
[sshd]
enabled = true
port = ssh
maxretry = 3
bantime = 3600
findtime = 600
[nginx-http-auth]
enabled = true
# Commands
fail2ban-client status
fail2ban-client set sshd unbanip 192.168.1.10
fail2ban-client get sshd ban Time
# Logs
tail -f /var/log/fail2ban.log

Answer:

Terminal window
# Status
getenforce
sestatus
# Set mode
setenforce 0 # permissive
setenforce 1 # enforcing
# Make persistent
# /etc/selinux/config
SELINUX=enforcing
# Context
chcon -t httpd_sys_content_t /var/www/html
semanage fcontext -a -t httpd_sys_content_t "/web(/.*)?"
restorecon -Rv /web
# Boolean
setsebool -P httpd_can_network_connect 1
getsebool -a | grep httpd

Answer:

Terminal window
# Status
aa-status
aa-enabled
# Modes
aa-complain /usr/bin/nginx
aa-enforce /usr/bin/nginx
# Profile
cat /etc/apparmor.d/usr.bin.nginx
#include <tunables/global>
/usr/bin/nginx {
# Allow read
/etc/nginx/** r,
# Allow write
/var/log/nginx/** wa,
# Network
network inet stream,
}
# Reload
apparmor_parser -r /etc/apparmor.d/usr.bin.nginx

Answer:

Terminal window
# Install
apt install aide
# Initialize
aideinit
# Update database
aide --update
# Check
aide --check
# Configuration /etc/aide/aide.conf
/etc/ NORMAL
/var/log/ LOG
!/var/log/*.log
# Daily check
0 5 * * * /usr/bin/aide --check

Answer:

Terminal window
# Install
apt install auditd
# Add rules
auditctl -w /etc/passwd -p wa -k passwd_change
auditctl -w /var/www/html -p r -k web_access
auditctl -w /sbin/iptables -p x -k iptables_change
# List rules
auditctl -l
# Search logs
ausearch -k passwd_change
aureport -f
# Enable at boot
auditctl -e 2

Answer:

/etc/sysctl.conf
# Network
net.ipv4.tcp_syncookies = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.icmp_echo_ignore_broadcasts = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
# Apply
sysctl -p
sysctl --system

Answer:

/etc/fstab
# Enable quota
/dev/sda1 /home ext4 defaults,usrquota,grpquota 0 2
# Remount
mount -o remount /home
# Initialize
quotacheck -aug
# Set quota
edquota -u username
# Enable
quotaon /home
# Check
quota -u username
repquota -a

Q295: How do you configure encrypted partitions?

Section titled “Q295: How do you configure encrypted partitions?”

Answer:

Terminal window
# Create LUKS
cryptsetup luksFormat /dev/sdb1
# Open
cryptsetup open /dev/sdb1 crypt_vol
# Format
mkfs.ext4 /dev/mapper/crypt_vol
# Mount
mount /dev/mapper/crypt_vol /mnt/data
# Close
umount /mnt/data
cryptsetup close crypt_vol
# Auto mount
# /etc/crypttab
crypt_vol /dev/sdb1 none luks

Answer:

Terminal window
# Install
apt install apache2
# Virtual host
<VirtualHost *:80>
ServerName example.com
ServerAlias www.example.com
DocumentRoot /var/www/html
<Directory /var/www/html>
Options -Indexes +FollowSymLinks
AllowOverride All
Require all granted
</Directory>
ErrorLog ${APACHE_LOG_DIR}/error.log
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>
# Enable
a2ensite example.conf
systemctl reload apache2

Answer:

Terminal window
# Install
apt install nginx
# Server block
server {
listen 80;
server_name example.com;
root /var/www/html;
index index.html;
location / {
try_files $uri $uri/ =404;
}
location ~ \.php$ {
fastcgi_pass unix:/run/php/php-fpm.sock;
fastcgi_index index.php;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
ssl_certificate /etc/ssl/certs/server.crt;
ssl_certificate_key /etc/ssl/private/server.key;
}

Answer:

/etc/haproxy/haproxy.cfg
# Install
apt install haproxy
global
log /dev/log local0
maxconn 4000
user haproxy
group haproxy
defaults
log global
mode http
option httplog
option dontlognull
frontend http-in
bind *:80
default_backend servers
backend servers
balance roundrobin
server s1 192.168.1.10:80 check
server s2 192.168.1.11:80 check
# Enable stats
listen stats
bind *:8404
stats enable
stats uri /stats

Answer:

/etc/postfix/main.cf
# Install
apt install postfix mailutils
myhostname = mail.example.com
mydomain = example.com
myorigin = $mydomain
mydestination = $myhostname, localhost, localhost.localdomain
mynetworks = 192.168.1.0/24 127.0.0.0/8
relayhost = [smtp.gmail.com]:587
smtp_sasl_auth_enable = yes
smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd
smtp_tls_security_level = encrypt
# Create credentials
echo "[smtp.gmail.com]:587 username:password" > /etc/postfix/sasl_passwd
postmap /etc/postfix/sasl_passwd

Answer:

/etc/dovecot/dovecot.conf
# Install
apt install dovecot-core dovecot-imapd
protocols = imap pop3
listen = *
# /etc/dovecot/10-auth.conf
disable_plaintext_auth = yes
auth_mechanisms = plain login
# /etc/dovecot/10-mail.conf
mail_location = maildir:~/Maildir
# Userdb (PAM)
# /etc/dovecot/10-auth.conf
!include auth-system.conf.ext
systemctl restart dovecot

Answer:

Terminal window
# Install
apt install mysql-server
# Secure
mysql_secure_installation
# Connect
mysql -u root -p
# Create database
CREATE DATABASE mydb;
CREATE USER 'myuser'@'localhost' IDENTIFIED BY 'password';
GRANT ALL PRIVILEGES ON mydb.* TO 'myuser'@'localhost';
FLUSH PRIVILEGES;
# Backup
mysqldump -u root -p mydb > backup.sql
# Restore
mysql -u root -p mydb < backup.sql

Answer:

Terminal window
# Install
apt install postgresql
# Connect
sudo -u postgres psql
# Create user/database
CREATE USER myuser WITH PASSWORD 'password';
CREATE DATABASE mydb OWNER myuser;
# Backup
pg_dump -U myuser mydb > backup.sql
# Restore
psql -U myuser mydb < backup.sql
# Roles
ALTER USER myuser WITH SUPERUSER;

Answer:

/etc/mysql/my.cnf
[mysqld]
innodb_buffer_pool_size = 4G
innodb_log_file_size = 1G
max_connections = 500
table_open_cache = 4000
query_cache_size = 0
slow_query_log = 1
slow_query_log_file = /var/log/mysql/slow.log
long_query_time = 2
max_connect_errors = 100000
wait_timeout = 600

Answer:

/etc/postgresql/14/main/postgresql.conf
shared_buffers = 4GB
effective_cache_size = 12GB
maintenance_work_mem = 1GB
checkpoint_completion_target = 0.9
wal_buffers = 64MB
default_statistics_target = 100
random_page_cost = 1.1
effective_io_concurrency = 200
work_mem = 26214kB
min_wal_size = 1GB
max_wal_size = 4GB

Q305: How do you configure MySQL replication?

Section titled “Q305: How do you configure MySQL replication?”

Answer:

# Master
[mysqld]
server-id = 1
log_bin = /var/log/mysql/mysql-bin
binlog_do_db = mydb
# Slave
[mysqld]
server-id = 2
relay-log = /var/log/mysql/mysql-relay-bin
# Commands
GRANT REPLICATION SLAVE ON *.* TO 'repl'@'%' IDENTIFIED BY 'password';
FLUSH TABLES WITH READ LOCK;
SHOW MASTER STATUS;
CHANGE MASTER TO MASTER_HOST='master_ip', MASTER_USER='repl', MASTER_PASSWORD='password', MASTER_LOG_FILE='mysql-bin.000001', MASTER_LOG_POS=xxx;
START SLAVE;

Q306: How do you configure PostgreSQL replication?

Section titled “Q306: How do you configure PostgreSQL replication?”

Answer:

# Master
wal_level = replica
max_wal_senders = 3
max_replication_slots = 3
# Slave
hot_standby = on
primary_conninfo = 'host=master_ip port=5432 user=repl password=password'
# Commands
pg_basebackup -h master_ip -D /var/lib/postgresql/14/main -P -Xs -R

Answer:

Terminal window
# Install
apt install redis-server
# Configure /etc/redis/redis.conf
bind 127.0.0.1
port 6379
maxmemory 2gb
maxmemory-policy allkeys-lru
save ""
# Commands
redis-cli
SET key value
GET key
DEL key
KEYS pattern

Answer:

Terminal window
# Install
apt install mongodb
# Connect
mongosh
# Commands
use mydb
db.users.insertOne({name: "John", age: 30})
db.users.find()
db.users.find({age: {$gt: 25}})
db.users.updateOne({name: "John"}, {$set: {age: 31}})
db.users.deleteOne({name: "John"})

Answer:

Terminal window
# MySQL
mysqldump -u root -p --all-databases > all_databases.sql
mysqldump -u root -p mydb | gzip > mydb.sql.gz
# PostgreSQL
pg_dumpall -U postgres > all.sql
pg_dump -U myuser mydb | gzip > mydb.sql.gz
# Automate
# /etc/cron.d/backup
0 2 * * * root mysqldump -u root -psecret mydb | gzip > /backup/mydb-$(date +\%Y\%m\%d).sql.gz

Answer:

Terminal window
# MySQL
DELETE FROM mysql.user WHERE User='';
DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1');
FLUSH PRIVILEGES;
# Create application user
CREATE USER 'appuser'@'localhost' IDENTIFIED BY 'strongpassword';
GRANT SELECT, INSERT, UPDATE, DELETE ON appdb.* TO 'appuser'@'localhost';
# PostgreSQL
# pg_hba.conf
host all all 127.0.0.1/32 md5
host all all ::1/128 md5

Answer:

/etc/prometheus/prometheus.yml
# Install
docker run -d -p 9090:9090 prom/prometheus
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'node'
static_configs:
- targets: ['localhost:9100']
# Node exporter
docker run -d -p 9100:9100 prom/node-exporter

Answer:

Terminal window
# Install
docker run -d -p 3000:3000 grafana/grafana
# Add datasource
curl -X POST http://localhost:3000/api/datasources \
-H "Content-Type: application/json" \
-u admin:admin \
-d '{"name":"Prometheus","type":"prometheus","url":"http://localhost:9090","access":"proxy"}'
# Dashboard JSON
# Import via UI or API

Answer:

Terminal window
# Elasticsearch
docker run -d -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:7
# Kibana
docker run -d -p 5601:5601 kibana:7
# Logstash
docker run -d -p 5044:5044 -v /etc/logstash/conf.d:/usr/share/logstash/pipeline logstash:7
# Filebeat (on clients)
filebeat.inputs:
- type: log
paths:
- /var/log/*.log
output.logstash:
hosts: ["logstash:5044"]

Answer:

Terminal window
# Install
apt install nagios4
# Commands /etc/nagios4/conf.d/commands.cfg
define command{
command_name check_local_disk
command_line $USER1$/check_disk -w $ARG1$ -c $ARG2$ -p $ARG3$
}
# Services /etc/nagios4/conf.d/services.cfg
define service{
host_name localhost
service_description Disk Space
check_command check_local_disk!20%!10%!/
}
# Enable checks
systemctl enable nagios4
systemctl start nagios4

Answer:

Terminal window
# Install
apt install zabbix-server-mysql zabbix-frontend-php
# Configure
# /etc/zabbix/zabbix_server.conf
DBHost=localhost
DBName=zabbix
DBUser=zabbix
DBPassword=password
# Web UI
# http://server/zabbix
# Agent
apt install zabbix-agent
# /etc/zabbix/zabbix_agentd.conf
Server=192.168.1.10
ServerActive=192.168.1.10
Hostname=localhost

Answer:

Terminal window
# Install
bash <(curl -Ss https://my-netdata.io/kickstart.sh)
# Configure
# /etc/netdata/netdata.conf
[global]
run as user = netdata
# Access
# http://localhost:19999

Answer:

Terminal window
# Install
pip install glances
# Run
glances
# Server mode
glances -s
# Remote
glances -c server-ip

Answer:

Terminal window
# Install
apt install htop
# Run
htop
# Options
htop -u user
htop -p PID1,PID2
# Interactive
# F3: search
# F4: filter
# F5: tree
# F6: sort
# F9: kill

Answer:

Terminal window
# Install
apt install atop
# Run
atop
# Interval
atop 1
# Specific process
atop -p PID
# Network
atop -n
# Write/read log
atop -w /tmp/atop.log 30 120
atop -r /tmp/atop.log

Answer:

Terminal window
# Enable
systemctl enable sysstat
systemctl start sysstat
# CPU
sar -u 1 5
# Memory
sar -r 1 5
# I/O
sar -b 1 5
# Network
sar -n DEV 1 5
# Generate report
sar -A > /tmp/sar_report.txt

Answer:

inventory.ini
[webservers]
web1 ansible_host=192.168.1.10
web2 ansible_host=192.168.1.11
[all:vars]
ansible_user=admin
ansible_python_interpreter=/usr/bin/python3
# playbook.yml
- hosts: webservers
become: yes
tasks:
- name: Install nginx
apt:
name: nginx
state: present
- name: Start nginx
service:
name: nginx
state: started
enabled: yes

Answer:

manifest.pp
class nginx {
package { 'nginx':
ensure => installed,
}
service { 'nginx':
ensure => running,
enable => true,
}
file { '/etc/nginx/nginx.conf':
ensure => file,
content => template('nginx/nginx.conf.erb'),
}
}
include nginx

Answer:

recipe.rb
package 'nginx' do
action :install
end
service 'nginx' do
action [:enable, :start]
end
template '/etc/nginx/nginx.conf' do
source 'nginx.conf.erb'
mode '0644'
notifies :restart, 'service[nginx]'
end

Answer:

/srv/salt/nginx.sls
nginx:
pkg.installed:
- name: nginx
service.running:
- name: nginx
- enable: True
file.managed:
- name: /etc/nginx/nginx.conf
- source: salt://nginx/nginx.conf
# /srv/pillar/top.sls
base:
'*':
- nginx

Answer:

Terminal window
# Edit crontab
crontab -e
# Examples
# Every minute
* * * * * /path/to/script.sh
# Daily at 2am
0 2 * * * /path/to/script.sh
# Weekly on Sunday
0 0 * * 0 /path/to/script.sh
# Every 5 minutes
*/5 * * * * /path/to/script.sh
# System cron
# /etc/cron.d/mycron
*/5 * * * * root /path/to/script.sh

Answer:

/etc/systemd/system/mytimer.timer
[Unit]
Description=Run daily at midnight
[Timer]
OnCalendar=daily
Persistent=true
[Install]
WantedBy=timers.target
# /etc/systemd/system/mytimer.service
[Unit]
Description=My Timer Job
[Service]
Type=oneshot
ExecStart=/usr/local/bin/myscript.sh

Answer:

Terminal window
# Install
apt install rundeck
# Configure
# /etc/rundeck/rundeck.properties
grails.serverURL=http://server:4440
# Create job (YAML)
- id: job-id
name: My Job
description: Run a script
execution:
steps:
- exec: /path/to/script.sh
nodefilters:
filter: 'name: web*'

Answer:

// Jenkinsfile
pipeline {
agent any
stages {
stage('Build') {
steps {
sh 'make build'
}
}
stage('Test') {
steps {
sh 'make test'
}
}
stage('Deploy') {
steps {
sh 'make deploy'
}
}
}
}

Answer:

.gitlab-ci.yml
stages:
- build
- test
- deploy
build:
stage: build
script:
- make build
test:
stage: test
script:
- make test
deploy:
stage: deploy
script:
- make deploy
only:
- main

Answer:

Terminal window
# Install
docker run -d --name awx -p 8080:8052 awxقاب
# Create inventory
awx inventory create --name "My Inventory"
# Create project
awx project create --name "My Project" --scm-type git --scm-url "https://github.com/myproject"
# Create job template
awx job_template create --name "Deploy" --project "My Project" --inventory "My Inventory" --playbook deploy.yml

Q331: How do you troubleshoot boot issues?

Section titled “Q331: How do you troubleshoot boot issues?”

Answer:

Terminal window
# Check logs
journalctl -b
journalctl -b -1
dmesg
# GRUB
# Press Shift during boot
# Edit boot entry, add: init=/bin/bash
# Mount root
mount -o remount,rw /
# Check fstab
cat /etc/fstab
blkid
# Rebuild initramfs
update-initramfs -u
# Reinstall GRUB
grub-install /dev/sda
grub-mkconfig -o /boot/grub/grub.cfg

Q332: How do you troubleshoot network issues?

Section titled “Q332: How do you troubleshoot network issues?”

Answer:

Terminal window
# Check IP
ip addr show
ifconfig -a
# Check connectivity
ping -c 4 8.8.8.8
ping -c 4 google.com
# Check DNS
nslookup google.com
dig google.com
# Check routes
ip route
route -n
# Check ports
netstat -tulpn
ss -tulpn
# Check firewall
iptables -L -n
firewall-cmd --list-all
# Restart network
systemctl restart NetworkManager

Q333: How do you troubleshoot disk issues?

Section titled “Q333: How do you troubleshoot disk issues?”

Answer:

Terminal window
# Check space
df -h
df -i
# Check inodes
df -i
# Find large files
du -sh /*
du -sh /var/* | sort -rh | head
# Check disk health
smartctl -a /dev/sda
smartctl -H /dev/sda
# Check filesystem
fsck -n /dev/sda1
# Check I/O
iostat -x 1
iotop

Answer:

Terminal window
# Top processes
top
htop
ps aux --sort=-%cpu | head
# Per-core
mpstat -P ALL 1
# Threads
ps -eLf -p PID
# Java
jstack PID
# strace
strace -c -p PID

Q335: How do you troubleshoot high memory?

Section titled “Q335: How do you troubleshoot high memory?”

Answer:

Terminal window
# Check usage
free -h
cat /proc/meminfo
# Top processes
ps aux --sort=-%mem | head
# Slab
slabtop
# OOM killer
dmesg | grep -i "out of memory"
journalctl -k | grep -i oom
# Clear cache
sync
echo 3 > /proc/sys/vm/drop_caches

Q336: How do you troubleshoot service failures?

Section titled “Q336: How do you troubleshoot service failures?”

Answer:

Terminal window
# Check status
systemctl status service
journalctl -u service -n 50
# Check logs
journalctl -xe
tail -f /var/log/messages
# Test manually
/usr/sbin/nginx -t
# Dependencies
systemctl list-dependencies service
# Ports
netstat -tulpn | grep LISTEN
# Kill and restart
systemctl kill service
systemctl restart service

Q337: How do you troubleshoot authentication?

Section titled “Q337: How do you troubleshoot authentication?”

Answer:

Terminal window
# Check logs
tail -f /var/log/auth.log
tail -f /var/log/secure
# PAM
auth.log
# SSH
ssh -v user@host
tail -f /var/log/auth.log
# SSSD
journalctl -u sssd

Q338: How do you troubleshoot performance?

Section titled “Q338: How do you troubleshoot performance?”

Answer:

Terminal window
# System overview
sar -A 1 5
# CPU
mpstat -P ALL 1
# Memory
vmstat 1
# I/O
iostat -x 1
# Network
iftop
nethogs
# Processes
strace -c -p PID
perf top

Answer:

Terminal window
# Test resolution
nslookup google.com
dig google.com
host google.com
# Check resolver
cat /etc/resolv.conf
systemd-resolve --status
# Flush DNS
systemd-resolve --flush-caches
resolvectl flush-caches
# Test specific server
dig @8.8.8.8 google.com
nslookup google.com 8.8.8.8

Answer:

Terminal window
# Test SSL
openssl s_client -connect example.com:443
openssl s_client -connect example.com:443 -servername example.com
# Check certificate
echo | openssl s_client -connect example.com:443 2>/dev/null | openssl x509 -noout -text
# Verify chain
openssl verify -CAfile ca.crt server.crt
# Check expiry
echo | openssl s_client -connect example.com:443 2>/dev/null | openssl x509 -noout -dates

Answer:

Terminal window
# Install
apt install qemu-kvm libvirt-daemon virt-manager
# List
virsh list --all
# Create
virt-install --name vm1 --ram 2048 --disk size=20 \
--os-variant ubuntu20.04 --location http://archive.ubuntu.com/ubuntu/dists/focal/main/installer-amd64/
# Start/Stop
virsh start vm1
virsh shutdown vm1
virsh destroy vm1
# Console
virsh console vm1

Answer:

Terminal window
# Pools
virsh pool-define-as default dir - - - - /var/lib/libvirt/images
virsh pool-start default
virsh pool-autostart default
virsh pool-list
# Networks
virsh net-define /etc/libvirt/qemu/networks/default.xml
virsh net-start default
# Volumes
virsh vol-create --pool default --name vm1.qcow2 --capacity 20G
virsh vol-list default

Q343: How do you configure KVM networking?

Section titled “Q343: How do you configure KVM networking?”

Answer:

Terminal window
# Bridge
virsh iface-bridge eth0 br0
# NAT network
virsh net-define /dev/stdin <<EOF
<network>
<name>default</name>
<forward mode='nat'/>
<bridge name='virbr0' stp='on' delay='0'/>
</network>
EOF
# Host-only
virsh net-define /dev/stdin <<EOF
<network>
<name>hostonly</name>
<forward mode='bridge'>
<bridge name='br0'/>
</forward>
</network>
EOF

Answer:

Terminal window
# Create snapshot
virsh snapshot-create-as vm1 --name snap1 --description "Before update"
# List snapshots
virsh snapshot-list vm1
# Revert
virsh snapshot-revert vm1 snap1
# Delete
virsh snapshot-delete vm1 snap1

Answer:

Terminal window
# Create image
qemu-img create -f qcow2 vm.qcow2 20G
# Convert
qemu-img convert -O qcow2 source.img dest.qcow2
# Resize
qemu-img resize vm.qcow2 +10G
# Info
qemu-img info vm.qcow2
# Run
qemu-system-x86_64 -m 2048 -hda vm.qcow2 -cdrom iso.img

Answer:

Terminal window
# Install
apt install virtualbox
# Create VM
VBoxManage createvm --name "MyVM" --ostype Ubuntu_64 --register
VBoxManage modifyvm "MyVM" --memory 2048 --vram 128
VBoxManage storagectl "MyVM" --name "SATA" --add sata
VBoxManage storageattach "MyVM" --storagectl "SATA" --port 0 --device 0 --type hdd --medium disk.vmdk
# Start
VBoxManage startvm "MyVM"

Answer:

Terminal window
# Install
apt install xen-hypervisor
# Create VM
xl create /etc/xen/vm1.cfg
# List
xl list
# Start/Stop
xl create vm1.cfg
xl shutdown vm1
xl destroy vm1
# Console
xl console vm1

Answer:

Terminal window
# Install
# Download ISO, install
# Create VM
qm create 100 --memory 2048 --name "MyVM" --net0 virtio,bridge=vmbr0
qm start 100
# List
qm list
pvesm list
# Backup
vzdump --compress --storage local 100

Answer:

Terminal window
# Install
# Install engine and hosts
# Create VM
ovirt-cli --user admin@internal --password password \
vm create --name "MyVM" --template "Blank" --cluster "Default"
# Start
ovirt-cli --user admin@internal --password password \
vm start --name "MyVM"

Answer:

Terminal window
# Install
apt install libguestfs-tools
# Mount
guestmount -a disk.img -m /dev/sda1 /mnt
# List files
guestfish -a disk.img -m /dev/sda1 ls /
# Edit
virt-edit -a disk.img /etc/passwd
# Copy in/out
virt-copy-in file.txt /root/
virt-copy-out /root/file.txt ./

Answer:

Terminal window
# Install
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
./aws/install
# Configure
aws configure
aws configure --profile name
# S3
aws s3 ls
aws s3 cp file.txt s3://bucket/
aws s3 sync ./ s3://bucket/
# EC2
aws ec2 describe-instances
aws ec2 start-instances --instance-id i-xxx

Answer:

Terminal window
# Install
curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash
# Login
az login
# VMs
az vm list
az vm start --name vmname --resource-group rgname
# Storage
az storage account list
az storage blob upload --account-name account --container-name container --name blob --file file

Answer:

Terminal window
# Install
curl https://sdk.cloud.google.com | bash
# Login
gcloud auth login
# Compute
gcloud compute instances list
gcloud compute instances start instance-name
# Storage
gsutil ls
gsutil cp file.txt gs://bucket/

Answer:

cloud-config.yaml
#cloud-config
package_update: true
packages:
- nginx
users:
- name: admin
ssh-authorized-keys:
- ssh-rsa AAAA...
sudo: ALL=(ALL) NOPASSWD:ALL
runcmd:
- systemctl enable nginx
- systemctl start nginx
write_files:
- path: /var/www/html/index.html
content: |
<html><h1>Hello World</h1></html>

Answer:

# Vagrantfile
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/focal64"
config.vm.network "private_network", ip: "192.168.33.10"
config.vm.synced_folder "./data", "/vagrant_data"
config.vm.provision "shell", inline: "apt-get update && apt-get install -y nginx"
config.vm.provider "virtualbox" do |vb|
vb.memory = "2048"
vb.cpus = 2
end
end
# Commands
vagrant up
vagrant ssh
vagrant halt
vagrant destroy

Answer:

{
"builders": [
{
"type": "amazon-ebs",
"region": "us-east-1",
"source_ami": "ami-xxx",
"instance_type": "t2.micro",
"ssh_username": "ubuntu",
"ami_name": "my-image"
}
],
"provisioners": [
{
"type": "shell",
"inline": [
"apt-get update",
"apt-get install -y nginx"
]
}
]
}
# Commands
packer validate template.json
packer build template.json

Answer:

main.tf
terraform {
required_providers {
aws = { source = "hashicorp/aws" }
}
}
provider "aws" {
region = "us-east-1"
}
resource "aws_instance" "web" {
ami = "ami-xxx"
instance_type = "t3.micro"
tags = { Name = "web" }
}
# Commands
terraform init
terraform plan
terraform apply
terraform destroy
terraform state list

Answer:

eksctl create cluster —name mycluster —region us-east-1 —nodegroup-name standard-workers —node-type t3.medium —nodes 3

gcloud container clusters create mycluster —zone us-east1-a —num-nodes 3

az aks create —resource-group mygroup —name mycluster —node-count 3 —enable-addons monitoring

### Q359: How do you use cloud-init on AWS?
**Answer:**
```bash
# User data
#cloud-config
package_update: true
packages:
- httpd
runcmd:
- systemctl start httpd
- systemctl enable httpd
# Or shell script
#!/bin/bash
yum update -y
yum install -y httpd
systemctl start httpd
systemctl enable httpd
# Pass via --user-data-file
aws ec2 run-instances --image-id ami-xxx --instance-type t2.micro \
--key-name mykey --user-data-file userdata.txt

Answer:

Terminal window
# Install agent
apt install amazon-ssm-agent
systemctl start amazon-ssm-agent
# Run command
aws ssm send-command \
--document-name "AWS-RunShellScript" \
--targets "[{\"Key\":\"instanceIds\",\"Values\":[\"i-xxx\"]}]" \
--parameters '{"commands":["hostname"]}'
# Session Manager
aws ssm start-session --target i-xxx
# Parameter Store
aws ssm get-parameters --names "/my/param"
aws ssm put-parameter --name "/my/param" --value "value" --type String

Answer:

Terminal window
# Basic sync
rsync -avz source/ destination/
# With delete
rsync -avz --delete source/ destination/
# Exclude
rsync -avz --exclude='*.log' --exclude='tmp/' source/ destination/
# Compress
rsync -avz --compress source/ destination/
# Progress
rsync -avz --progress source/ destination/
# Dry run
rsync -avzn source/ destination/

Answer:

Terminal window
# Create archive
tar -cvf archive.tar file1 file2 directory/
# With compression
tar -czvf archive.tar.gz directory/
tar -cjvf archive.tar.bz2 directory/
tar -cJvf archive.tar.xz directory/
# Extract
tar -xvf archive.tar
tar -xzvf archive.tar.gz
tar -xjvf archive.tar.bz2
# List
tar -tvf archive.tar

Answer:

Terminal window
# Dump
dump -0uf /backup/root.dump /
dump -1uf /backup/root.dump /
# Restore
restore -rf /backup/root.dump
restore -tvf /backup/root.dump
# Interactive
restore -if /backup/root.dump

Answer:

Terminal window
# Clone disk
dd if=/dev/sda of=/dev/sdb bs=4M status=progress
# Create image
dd if=/dev/sda of=/backup/sda.img bs=4M status=progress
# Restore
dd if=/backup/sda.img of=/dev/sda bs=4M status=progress
# Zero disk
dd if=/dev/zero of=/dev/sda bs=1M status=progress
# Test speed
dd if=/dev/zero of=/tmp/test bs=1G count=1 oflag=direct

Answer:

Terminal window
# Create snapshot
lvcreate -L 10G -s -n snap /dev/vg/lv
# Mount snapshot
mkdir -p /mnt/snap
mount -o ro /dev/vg/snap /mnt/snap
# Restore
umount /mnt/snap
lvconvert --merge /dev/vg/snap
# Remove
lvremove /dev/vg/snap

Answer:

Terminal window
# Install
apt install bacula-server bacula-client
# Configure director
# /etc/bacula/bacula-dir.conf
# Backup job
Job {
Name = "BackupClient1"
JobDefs = "DefaultJob"
Client = client1-fd
FileSet = "Full Set"
}
# Run
bconsole

Answer:

Terminal window
# Install
apt install amanda-server amanda-client
# Configure
# /etc/amanda/DailySet1/amanda.conf
# Create backup
amdump DailySet1
# Restore
amrecover DailySet1

Answer:

Terminal window
# Initialize
restic init --repo /backup
# Backup
restic -r /backup backup /data
# Snapshots
restic -r /backup snapshots
# Restore
restic -r /backup restore latest --target /restore
# Check
restic -r /backup check
# Forget old
restic -r /backup forget --keep-daily 7 --keep-weekly 4

Answer:

Terminal window
# Initialize
borg init --encryption=repkey /backup
# Backup
borg create /backup::archive-$(date +%Y%m%d) /data
# List
borg list /backup
# Mount
borg mount /backup::archive-2023-01-01 /mnt
# Extract
borg extract /backup::archive-2023-01-01
# Prune
borg prune /backup --keep-daily 7 --keep-weekly 4 --keep-monthly 6

Answer:

backup.sh
#!/bin/bash
DATE=$(date +%Y%m%d)
BACKUP_DIR="/backup"
SOURCE="/data"
# MySQL backup
mysqldump -u root -p"$MYSQL_PASSWORD" --all-databases | gzip > "$BACKUP_DIR/mysql-$DATE.sql.gz"
# Files backup
rsync -avz --delete \
--exclude='*.log' \
--exclude='cache/' \
"$SOURCE/" "$BACKUP_DIR/data-$DATE/"
# Cleanup old (30 days)
find "$BACKUP_DIR" -type f -mtime +30 -delete
find "$BACKUP_DIR" -type d -mtime +30 -exec rm -rf {} \;
echo "Backup completed: $DATE"

Answer:

/etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass password
}
virtual_ipaddress {
192.168.1.100 dev eth0 label eth0:vip
}
}

Answer:

Terminal window
# Setup
pcs host auth node1 node2
pcs cluster setup mycluster node1 node2
# Resources
pcs resource create VIP ocf:heartbeat:IPaddr2 ip=192.168.1.100 cidr_netmask=24 op monitor interval=30s
# Constraints
pcs constraint location VIP rule score=200 pingd 100
# Status
pcs status

Answer:

/etc/corosync/corosync.conf
totem {
version: 2
cluster_name: mycluster
transport: udpu
}
nodelist {
node {
ring0_addr: 192.168.1.10
nodeid: 1
}
node {
ring0_addr: 192.168.1.11
nodeid: 2
}
}
quorum {
provider: corosync_votequorum
}

Answer:

/etc/drbd.d/r0.res
resource r0 {
on node1 {
device /dev/drbd0;
disk /dev/sdb1;
address 192.168.1.10:7789;
meta-disk internal;
}
on node2 {
device /dev/drbd0;
disk /dev/sdb1;
address 192.168.1.11:7789;
meta-disk internal;
}
}
# Initialize
drbdadm create-md r0
drbdadm up r0
drbdadm primary --force r0

Answer:

Terminal window
# Install
apt install glusterfs-server
# Add peers
gluster peer probe node2
# Create volume
gluster volume create gv0 replica 2 \
node1:/brick1/gv0 \
node2:/brick1/gv0
# Start
gluster volume start gv0
# Mount
mount -t glusterfs node1:/gv0 /mnt

Answer:

Terminal window
# Install
apt install ceph-mon ceph-osd ceph-mgr ceph-mds
# Deploy
ceph-deploy new node1
ceph-deploy install node1
ceph-deploy mon create-initial
# Create OSD
ceph-deploy osd create --data /dev/sdb node1
# Create pool
ceph osd pool create mypool 100
# Mount
mount -t ceph node1:/ /mnt/ceph

Q377: How do you use Haproxy with keepalived?

Section titled “Q377: How do you use Haproxy with keepalived?”

Answer:

# haproxy.cfg
global
log /dev/log local0
maxconn 4000
frontend http
bind *:80
default_backend servers
backend servers
balance roundrobin
server s1 192.168.1.10:80 check
server s2 192.168.1.11:80 check
# keepalived
vrrp_script haproxy {
script "killall -0 haproxy"
interval 2
weight 2
}

Answer:

Terminal window
# MHA
# Install
apt install mha4mysql-node mha4mysql-manager
# Configure
# /etc/app1.cnf
[server default]
manager_workdir=/var/log/masterha/app1
manager_log=/var/log/masterha/app1.log
remote_workdir=/var/log/masterha
[server1]
hostname=192.168.1.10
candidate_master=1
[server2]
hostname=192.168.1.11

Answer:

# sentinel.conf
sentinel monitor mymaster 192.168.1.10 6379 2
sentinel down-after-milliseconds mymaster 5000
sentinel failover-timeout mymaster 180000
sentinel parallel-syncs mymaster 1
# Start
redis-sentinel /etc/redis/sentinel.conf
# Client
JedisSentinelPool pool = new JedisSentinelPool("mymaster", sentinels);

Answer:

Terminal window
# Install
apt install consul
# Start agent
consul agent -server -bootstrap-expect=3 -data-dir=/tmp/consul -node=node1
# Join cluster
consul join 192.168.1.10
# Service
consul services register
# Health check
curl http://localhost:8500/v1/health/service/myservice

Answer:

/etc/dhcp/dhcpd.conf
subnet 192.168.1.0 netmask 255.255.255.0 {
range 192.168.1.100 192.168.1.200;
option routers 192.168.1.1;
option subnet-mask 255.255.255.0;
option domain-name-servers 8.8.8.8, 8.8.4.4;
default-lease-time 600;
max-lease-time 7200;
}

Answer:

/etc/bind/named.conf.local
zone "example.com" {
type master;
file "forward.zone";
};
zone "1.168.192.in-addr.arpa" {
type master;
file "reverse.zone";
};
# forward.zone
$TTL 86400
@ IN SOA ns1.example.com. admin.example.com. (
2023010101 3600 1800 604800 86400)
@ IN NS ns1.example.com.
ns1 IN A 192.168.1.10
www IN A 192.168.1.20

Answer:

Terminal window
# Server config
port 1194
proto udp
dev tun
ca ca.crt
cert server.crt
key server.key
dh dh.pem
server 10.8.0.0 255.255.255.0
ifconfig-pool-persist ipp.txt
push "redirect-gateway def1 bypass-dhcp"
push "dhcp-option DNS 8.8.8.8"
# Client config
client
dev tun
proto udp
remote vpn.example.com 1194
ca ca.crt
cert client.crt
key client.key

Answer:

Terminal window
# Server
[Interface]
Address = 10.0.0.1/24
ListenPort = 51820
PrivateKey = <server-private-key>
[Peer]
PublicKey = <client-public-key>
AllowedIPs = 10.0.0.2/32
# Client
[Interface]
Address = 10.0.0.2/24
PrivateKey = <client-private-key>
[Peer]
PublicKey = <server-public-key>
Endpoint = vpn.example.com:51820
AllowedIPs = 0.0.0.0/0

Answer:

/etc/squid/squid.conf
http_port 3128
acl localnet src 192.168.1.0/24
http_access allow localnet
http_access deny all
cache_dir ufs /var/spool/squid 10000 16 256
maximum_object_size 4096 MB
# Authentication
auth_param basic program /usr/lib/squid/basic_ncsa_auth /etc/squid/passwd
acl authenticated proxy_auth REQUIRED
http_access allow authenticated

Q386: How do you configure Postfix with SMTP auth?

Section titled “Q386: How do you configure Postfix with SMTP auth?”

Answer:

/etc/postfix/main.cf
smtpd_sasl_type = dovecot
smtpd_sasl_path = private/auth
smtpd_sasl_auth_enable = yes
smtpd_sasl_security_options = noanonymous
smtpd_sasl_local_domain = $myhostname
# /etc/postfix/master.cf
submission inet n - - - - smtpd
-o smtpd_sasl_auth_enable=yes
-o smtpd_client_restrictions=permit_sasl_authenticated,reject

Q387: How do you configure Dovecot with SASL?

Section titled “Q387: How do you configure Dovecot with SASL?”

Answer:

/etc/dovecot/10-master.conf
service auth {
unix_listener /var/spool/postfix/private/auth {
mode = 0660
user = postfix
group = postfix
}
}
# /etc/dovecot/10-auth.conf
auth_mechanisms = plain login
!include auth-system.conf.ext

Answer:

/etc/samba/smb.conf
[global]
workgroup = WORKGROUP
security = user
map to guest = Bad User
[shared]
path = /srv/samba/shared
browsable = yes
writable = yes
guest ok = no
valid users = @smbgroup

Answer:

Terminal window
# Server /etc/exports
/exports *(rw,sync,fsid=0,no_subtree_check,no_root_squash)
# Client
mount -t nfs4 server:/ /mnt/nfs
# Or in fstab
server:/ /mnt/nfs nfs4 defaults,_netdev 0 0

Answer:

Terminal window
# Target
targetcli
/> create iqn.2023-01.com.example:storage.lun0
/> /backstores/block create name=block1 dev=/dev/sdb
/> /iqn.2023-01.com.example:storage.lun0/luns create /backstores/block/block1
/> /iqn.2023-01.com.example:storage.lun0/acls create iqn.2023-01.com.example:client
/> saveconfig

Answer:

Terminal window
# Install bpftrace
apt install bpftrace
# Trace
btrace -p PID
btrace /path/to/script.bt
# Using bpfcc
apt install bpfcc-tools
execsnoop-bpfcc
opensnoop-bpfcc
funclatency-bpfcc
# Write BPF program
cat > test.bt <<EOF
BEGIN
{
printf("Tracing... Hit Ctrl-C to end.\n")
}
kprobe:do_nice
{
printf("%s %d\n", comm, arg1)
}
EOF
btrace test.bt

Answer:

Terminal window
# Create cgroup
mkdir -p /sys/fs/cgroup/mygroup
# Set limits
echo 100000000 > /sys/fs/cgroup/mygroup/cpu.max
echo 1G > /sys/fs/cgroup/mygroup/memory.max
# Add process
echo PID > /sys/fs/cgroup/mygroup/cgroup.procs
# Make persistent
# /etc/cgconfig.conf
group mygroup {
cpu {
cpu.max = "100000000";
}
memory {
memory.max = "1073741824";
}
}

Answer:

Terminal window
# Network namespace
ip netns add red
ip netns exec red ip link
# User namespace
unshare --user
# PID namespace
unshare --pid --fork --mount-proc
# Mount namespace
unshare --mount
# Combined
unshare --mount --user --pid --fork --mount-proc --map-root-user

Answer:

Terminal window
# Default profile
docker run --rm -it --security-opt seccomp=default hello-world
# Custom profile
cat > /etc/docker/seccomp.json <<EOF
{
"defaultAction": "SCMP_ACT_ERRNO",
"architectures": ["SCMP_ARCH_X86_64"],
"syscalls": [
{"names": ["read", "write"], "action": "SCMP_ACT_ALLOW"}
]
}
EOF

Answer:

Terminal window
# Check capabilities
getcap -r /usr/bin
# Add capability
setcap cap_net_raw+ep /usr/bin/ping
# Check
getcap /usr/bin/ping
# Remove
setcap -r /usr/bin/ping
# Check process
cat /proc/PID/status | grep Cap

Answer:

Terminal window
# Mount overlay
mount -t overlay overlay -o lowerdir=/lower,upperdir=/upper,workdir=/work /merged
# In Docker
# Uses overlay2 by default
# Kernel module
modprobe overlay

Answer:

Terminal window
# Install
apt install fuse libfuse-dev
# Example using sshfs
sshfs user@host:/remote /local/mount
# Unmount
fusermount -u /local/mount
# List available
ls /dev/fuse
cat /proc/filesystems | grep fuse

Answer:

Terminal window
# Install
apt install stratisd stratis-cli
# Start
systemctl start stratisd
# Create pool
stratis pool create mypool /dev/sdb /dev/sdc
# Create filesystem
stratis fs create mypool myfs
# Mount
mount /stratis/mypool/myfs /mnt
# List
stratis pool list
stratis fs list

Q399: How do you use VDO (Virtual Data Optimizer)?

Section titled “Q399: How do you use VDO (Virtual Data Optimizer)?”

Answer:

Terminal window
# Install
apt install vdo kmod-kvdo
# Create VDO
vdo create --name=vdo0 --device=/dev/sdb --vdoLogicalSize=100G
# Format
mkfs.xfs /dev/mapper/vdo0
# Mount
mount /dev/mapper/vdo0 /mnt
# Deduplication
vdo enableDeduplication vdo0
vdo enableCompression vdo0

Answer:

Terminal window
# Create cache
dmsetup create cache0 --table "0 2097152 cache /dev/sda3 /dev/sdb1 /dev/sdc1 128 2 checkpoint 2 threshold 2"
# Check status
dmsetup status cache0
# Remove
dmsetup remove cache0

Q401: How do you configure Linux audit rules?

Section titled “Q401: How do you configure Linux audit rules?”

Answer:

Terminal window
# Monitor file changes
auditctl -w /etc/passwd -p wa -k passwd_change
auditctl -w /etc/shadow -p wa -k shadow_change
# Monitor directory
auditctl -w /var/www/html -p r -k web_access
# Monitor commands
auditctl -w /usr/bin/rm -p x -k rm_command
# List rules
auditctl -l
# Search
ausearch -k passwd_change
aureport -f

Answer:

Terminal window
# Install
apt install aide
# Initialize
aideinit
# Update after changes
aide --update
# Check
aide --check
# Configuration
# /etc/aide/aide.conf
/etc/pam.d/ NORMAL
/etc/shadow NORMAL
/var/log/ LOG
!/var/log/*.log

Q403: How do you configure Lynis security audit?

Section titled “Q403: How do you configure Lynis security audit?”

Answer:

Terminal window
# Install
apt install lynis
# Audit
lynis audit system
lynis audit system --profile custom.prf
# Quick audit
lynis quick
# Update
lynis update info

Answer:

Terminal window
# Install
apt install rkhunter
# Update
rkhunter --update
# Check
rkhunter --check
# Cron
# /etc/cron.daily/rkhunter
#!/bin/sh
rkhunter --check --skip-keypress --report-warnings-only

Answer:

Terminal window
# Install
apt install chkrootkit
# Check
chkrootkit
# Specific
chkrootkit -q
# Cron
# /etc/cron.daily/chkrootkit
#!/bin/sh
chkrootkit | grep -v "not found"

Answer:

Terminal window
# Install
apt install clamav
# Update
freshclam
# Scan
clamscan -r /home
clamscan -r --remove /home
# Daemon
systemctl start clamav-freshclam
systemctl start clamav-daemon
# On-access
clamfs /etc/clamfs/clamfs.xml

Answer:

Terminal window
# Install
apt install ossec-hids-agent
# Configure
# /var/ossec/etc/ossec.conf
<active-response>
<disabled>no</disabled>
</active-response>
# Start
/var/ossec/bin/ossec-control start
# View alerts
tail -f /var/ossec/logs/alerts/alerts.log

Answer:

Terminal window
# Install
apt install tripwire
# Initialize
twadmin --create-polfile /etc/tripwire/tw.pol
tripwire --init
# Check
tripwire --check
# Update
tripwire --update --accept-all
# Update policy
twadmin --create-polfile /etc/tripwire/twpol.txt
tripwire --update-policy /etc/tripwire/twpol.txt

Answer:

Terminal window
# Install
apt install suricata
# Update rules
suricata-update
# Run
suricata -i eth0
suricata -c /etc/suricata/suricata.yaml -i eth0
# Log
tail -f /var/log/suricata/fast.log
# Test
suricata -t /etc/suricata/suricata.yaml

Answer:

Terminal window
# Install
apt install snort
# Configure
# /etc/snort/snort.conf
ipvar HOME_NET 192.168.1.0/24
var RULE_PATH /etc/snort/rules
# Test rules
snort -T -c /etc/snort/snort.conf
# Run
snort -i eth0 -c /etc/snort/snort.conf
# Logs
ls /var/log/snort/

Q411: How do you configure Linux audit for compliance?

Section titled “Q411: How do you configure Linux audit for compliance?”

Answer:

Terminal window
# PCI-DSS requirements
auditctl -w /etc/passwd -p wa -k pci_passwd
auditctl -w /etc/shadow -p wa -k pci_shadow
auditctl -w /var/log/ -p wa -k pci_logs
auditctl -w /etc/sudoers -p wa -k pci_sudoers
# CIS benchmarks
auditctl -w /etc/group -p wa -k group_change
auditctl -w /etc/hosts -p wa -k hosts_change
auditctl -w /etc/sysctl.conf -p wa -k sysctl_change

Answer:

Terminal window
# Install
apt install osquery
# Interactive
osqueryi
# Queries
SELECT * FROM users;
SELECT * FROM processes WHERE name = 'nginx';
SELECT * FROM listening_ports;
SELECT * FROM file_events;
# Daemon
systemctl enable osqueryd
systemctl start osqueryd

Answer:

Terminal window
# Install
apt install sysdig
# Capture
sysdig -w dump.scap
sysdig -w dump.scap -C 100 # 100MB files
# Analyze
sysdig -r dump.scap
sysdig -r dump.scap -c "topprocs_cpu"
sysdig -r dump.scap -c "topfiles_bytes"
# Live
sysdig -c topprocs_cpu
sysdig -c fdcount_by proc.name "evt.type=read"

Answer:

Terminal window
# Download
curl -L https://github.com/carlospolop/PEASS-ng/releases/latest/download/linpeas.sh -o linpeas.sh
# Run
chmod +x linpeas.sh
./linpeas.sh
# Options
./linpeas.sh -s # stealth
./linpeas.sh -o # only interesting
./linpeas.sh -P # password spray

Q415: How do you use linux-exploit-suggester?

Section titled “Q415: How do you use linux-exploit-suggester?”

Answer:

Terminal window
# Download
curl -L https://raw.githubusercontent.com/mzet-/linux-exploit-suggester/master/linux-exploit-suggester.sh -o les.sh
# Run
chmod +x les.sh
./les.sh
# With kernel version
./les.sh -k 5.4.0
# Show exploits only
./les.sh -p

Answer:

Terminal window
# Install
apt install lynis
# Audit
lynis audit system
# Specific tests
lynis audit system --tests "AUTH-9262"
# Report
lynis show report
# Cron job
# /etc/cron.d/lynis
0 3 * * * root /usr/bin/lynis audit system --cronjob

Answer:

Terminal window
# Install
apt install openscap-scanner scap-security-guide
# List profiles
oscap info /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml
# Scan
oscap xccdf eval --profile xccdf_org.ssgproject.content_profile_cis \
--results scan-results.xml /usr/share/xml/scap/ssg/content/ssg-ubuntu2004-ds.xml
# Generate report
oscap xccdf generate report scan-results.html scan-results.xml

Answer:

Terminal window
# Install
apt install cockpit
# Enable
systemctl enable --now cockpit.socket
# Access
# https://server:9090
# Modules
apt install cockpit-storaged
apt install cockpit-networkmanager
apt install cockpit-packagekit

Answer:

Terminal window
# Install
apt install webmin
# Access
# https://server:10000
# Modules available
# System, Servers, Networking, Hardware, Clusters, Others

Q420: How do you use Cockpit with Kubernetes?

Section titled “Q420: How do you use Cockpit with Kubernetes?”

Answer:

Terminal window
# Install
apt install cockpit-podman
# Enable
systemctl enable --now cockpit.socket
# Podman
# https://server:9090/podman
# Kubernetes
# Install kubernetes plugin
apt install cockpit-kubernetes

Q421: How do you optimize Linux for containers?

Section titled “Q421: How do you optimize Linux for containers?”

Answer:

Terminal window
# Kernel parameters
net.core.somaxconn = 1024
net.ipv4.ip_local_port_range = 1024 65535
fs.file-max = 2097152
kernel.pid_max = 4194304
# Disable swap
swapoff -a
# Transparent huge pages
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
# SELinux/AppArmor
setenforce 0

Q422: How do you optimize Linux for databases?

Section titled “Q422: How do you optimize Linux for databases?”

Answer:

/etc/sysctl.conf
# Memory
vm.swappiness = 10
vm.dirty_ratio = 15
vm.dirty_background_ratio = 5
# Network
net.core.rmem_max = 134217728
net.core.wmem_max = 134217728
# File descriptors
fs.file-max = 65535
# Kernel
kernel.shmmax = 68719476736
kernel.shmall = 4294967296
# Limits.conf
* soft nofile 65535
* hard nofile 65535

Answer:

Terminal window
# CPU governor
echo performance > /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
# Network
# Use RDMA
modprobe mlx5_core
# or
modprobe rdma_ucm
# Filesystem
# Use Lustre, GPFS, or parallel fs
# Mount with noatime

Answer:

Terminal window
# Install
apt install tuned
# List profiles
tuned-adm list
# Active profile
tuned-adm active
# Set profile
tuned-adm profile throughput-performance
tuned-adm profile virtual-guest
# Verify
tuned-adm verify

Answer:

Terminal window
# Install
apt install ktune
# Configure
# /etc/ktune/ktune.sh
#!/bin/bash
for cpu in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do
echo performance > $cpu
done
# Enable
systemctl enable ktune
systemctl start ktune

Answer:

/etc/default/grub
# Add kernel parameter
GRUB_CMDLINE_LINUX_DEFAULT="isolcpus=1,2,3"
# Update GRUB
update-grub
# Or using cgroups
# cset shield -c 1,2,3

Answer:

Terminal window
# Install
apt install cpuset
# Create cpuset
cpuset -c -g system:myapp
# Set CPUs
cpuset -g cpus:0-3 system:myapp
# Add process
cpuset -a PID system:myapp
# Check
cpuset -g system:myapp

Answer:

Terminal window
# Set CPU affinity
taskset -c 0,1 command
taskset -p 0x03 PID
# Show affinity
taskset -p PID
# Run on cores
taskset -c 0-3 myapp

Answer:

Terminal window
# Show NUMA topology
numactl --hardware
numactl --show
# Run on specific node
numactl --membind=0 command
numactl --cpunodebind=0 command
# Interleave
numactl --interleave=all command

Answer:

Terminal window
# Enable CPU
chcpu -e 1
# Disable CPU
chcpu -d 1
# List
chcpu -l

Q431: How do you configure transparent huge pages?

Section titled “Q431: How do you configure transparent huge pages?”

Answer:

Terminal window
# Check current
cat /sys/kernel/mm/transparent_hugepage/enabled
# Disable
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
# Make persistent
# /etc/rc.d/rc.local
if test -f /sys/kernel/mm/transparent_hugepage/enabled; then
echo never > /sys/kernel/mm/transparent_hugepage/enabled
fi

Answer:

Terminal window
# Check
cat /sys/block/sda/queue/scheduler
# Set deadline (for HDD)
echo deadline > /sys/block/sda/queue/scheduler
# Set none (for SSD)
echo none > /sys/block/sda/queue/scheduler
# Persistent
# /etc/udev/rules.d/60-ioschedulers.rules
ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/scheduler}="deadline"

Q433: How do you tune network for latency?

Section titled “Q433: How do you tune network for latency?”

Answer:

Terminal window
# Kernel parameters
net.ipv4.tcp_low_latency = 1
net.core.netdev_max_backlog = 1000
# Increase socket buffer
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216

Q434: How do you use BBR congestion control?

Section titled “Q434: How do you use BBR congestion control?”

Answer:

Terminal window
# Check
sysctl net.ipv4.tcp_available_congestion_control
# Enable
modprobe tcp_bbr
# Configure
sysctl -w net.core.default_qdisc=fq
sysctl -w net.ipv4.tcp_congestion_control=bbr
# Make persistent
echo "tcp_bbr" >> /etc/modules-load.d/modules.conf

Q435: How do you configure huge pages for database?

Section titled “Q435: How do you configure huge pages for database?”

Answer:

Terminal window
# Check
cat /proc/meminfo | grep Huge
# Set
sysctl -w vm.nr_hugepages=128
# Persistent
# /etc/sysctl.conf
vm.nr_hugepages = 128
# For PostgreSQL
# postgresql.conf
shared_huge_pages = on

Answer:

Terminal window
# Enable
modprobe zswap
# Check
cat /sys/module/zswap/parameters/enabled
# Compressor
# /etc/modprobe.d/zswap.conf
options zswap compressor=lz4
# Pool
# /etc/modprobe.d/zswap.conf
options zswap zpool=z3fold

Answer:

Terminal window
# Load module
modprobe zram num_devices=2
# Configure
echo lz4 > /sys/block/zram0/comp_algorithm
echo 1G > /sys/block/zram0/disksize
# Format and mount
mkswap /dev/zram0
swapon /dev/zram0 -p 100
# Multiple devices
echo 512M > /dev/zram1/disksize
mkswap /dev/zram1
swapon /dev/zram1

Answer:

Terminal window
# Install
apt install earlyoom
# Configure
# /etc/default/earlyoom
EARLYOOM_ARGS="-m 10 -r 60"
# Start
systemctl enable earlyoom
systemctl start earlyoom

Answer:

Terminal window
# Check
cat /proc/sys/vm/overcommit_memory
# Disable
sysctl -w vm.overcommit_memory=2
# Adjust OOM score
echo -15 > /proc/PID/oom_score_adj
# Never kill root
sysctl -w vm.oom_dump_tasks=1

Answer:

Terminal window
# Check
cat /proc/sys/vm/pagecache
# Configure
sysctl -w vm.pagecache=1
# Make persistent
# /etc/sysctl.conf
vm.pagecache = 1

Answer:

/etc/systemd/journald.conf
[Journal]
SystemMaxUse=500M
SystemKeepFree=100M
RuntimeMaxUse=200M
RuntimeKeepFree=50M
MaxFileSec=1month
ForwardToSyslog=yes
Compress=yes
# Reload
systemctl restart systemd-journald

Answer:

Terminal window
# View all
journalctl
# Since
journalctl --since "1 hour ago"
journalctl --since "2023-01-01"
# Until
journalctl --until "2023-01-01"
# Priority
journalctl -p err
# Service
journalctl -u nginx
# Follow
journalctl -f
# Disk usage
journalctl --disk-usage

Answer:

/etc/rsyslog.conf
# Modules
$ModLoad imudp
$UDPServerRun 514
$ModLoad imtcp
$InputTCPServerRun 514
# Templates
$template RemoteLogs,"/var/log/%HOSTNAME%/%programname%.log"
# Forward
*.* @@remote-host:514
# Filter
:programname, isequal, "nginx" /var/log/nginx.log
& ~

Answer:

/etc/logrotate.d/nginx
/var/log/nginx/*.log {
daily
missingok
rotate 14
compress
delaycompress
notifempty
create 0640 www-data adm
sharedscripts
postrotate
[ -f /var/run/nginx.pid ] && kill -USR1 `cat /var/run/nginx.pid`
endscript
}

Answer:

Terminal window
# Using rsyslog
# Server
$ModLoad imtcp
$InputTCPServerRun 514
# Client
*.* @@server:514
# Or using ELK
# Filebeat -> Logstash -> Elasticsearch -> Kibana
# Or using Loki
# Promtail -> Loki -> Grafana

Answer:

Terminal window
# Install
docker run -d -p 9200:9200 -p 9300:9300 elasticsearch
# Create index
curl -X PUT "localhost:9200/myindex"
# Add document
curl -X POST "localhost:9200/myindex/_doc" -H 'Content-Type: application/json' -d'{"field":"value"}'
# Search
curl -X GET "localhost:9200/myindex/_search"

Answer:

Terminal window
# Install
docker run -d -p 5601:5601 kibana
# Connect to Elasticsearch
# Kibana -> Settings -> Indices
# Create visualization
# Visualize -> Create new
# Create dashboard
# Dashboard -> Create new

Q448: How do you use Grafana with Prometheus?

Section titled “Q448: How do you use Grafana with Prometheus?”

Answer:

Terminal window
# Add datasource
# Configuration -> Data Sources -> Add Prometheus
# URL: http://prometheus:9090
# Add dashboard
# Dashboards -> Import
# JSON or ID

Answer:

Terminal window
# Install
docker run -d -p 3100:3100 grafana/loki
# Configure Promtail
# /etc/promtail/promtail.yaml
server:
http_listen_port: 9080
grpc_listen_port: 0
clients:
- url: http://loki:3100/api/prom/push
scrape_configs:
- job_name: system
static_configs:
- targets: [localhost]
labels:
job: varlogs
__path__: /var/log/*.log

Answer:

Terminal window
# Install
docker run -d -p 9093:9093 prom/alertmanager
# Configure
# /etc/alertmanager/alertmanager.yml
route:
group_by: ['alertname']
receiver: 'email'
receivers:
- name: 'email'
email_configs:
- to: 'admin@example.com'
from: 'alertmanager@example.com'
smarthost: 'smtp.example.com:587'
auth_username: 'user'
auth_password: 'pass'

Q451: How do you configure Prometheus alerting?

Section titled “Q451: How do you configure Prometheus alerting?”

Answer:

/etc/prometheus/alerts.yml
groups:
- name: alerts
rules:
- alert: HighCPU
expr: 100 - (avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100) > 80
for: 5m
labels:
severity: warning
annotations:
summary: "High CPU usage on {{ $labels.instance }}"

Answer:

Terminal window
# Install
docker run -d -p 9115:9110 prom/blackbox-exporter
# Configure
# /etc/blackbox/blackbox.yml
modules:
http_2xx:
prober: http
timeout: 5s
# Prometheus
- job_name: blackbox
metrics_path: /probe
params:
module: [http_2xx]
static_configs:
- targets:
- https://example.com
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance

Answer:

Terminal window
# Install
docker run -d -p 9100:9100 prom/node-exporter
# Collectors
# --collector.cpu
# --collector.meminfo
# --collector.diskstats
# --collector.netdev
# Textfile collector
# --collector.textfile.directory=/var/lib/node_exporter/textfile_collector

Answer:

Terminal window
# Install
docker run -d \
--volume=/:/rootfs:ro \
--volume=/var/run:/var/run:ro \
--volume=/sys:/sys:ro \
--volume=/var/lib/docker/:/var/lib/docker:ro \
--publish=8080:8080 \
gcr.io/cadvisor/cadvisor
# Access
# http://localhost:8080

Answer:

Terminal window
# Install
docker run -d -p 9091:9091 prom/pushgateway
# Push metrics
echo "metric_name 3.14" | curl --data-binary @- http://pushgateway:9091/metrics/job/some_job
# Delete
curl -X DELETE http://pushgateway:9091/metrics/job/some_job

Answer:

Terminal window
# Install
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/master/bundle.yaml
# Create ServiceMonitor
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: myapp
spec:
selector:
matchLabels:
app: myapp
endpoints:
- port: web

Answer:

Terminal window
# Sidecar
# In Prometheus
external_labels:
cluster: 'cluster1'
replica: 'a'
thanos:
sidecar:
tsdb: /prometheus
grpc-address: :10901
http-address: :10902

Answer:

docker-compose.yml
cortex:
image: cortexproject/cortex:latest
ports:
- "9009:9009"
- "9091:9091"
command:
- -config.file=/etc/cortex/cortex.yaml

Answer:

Terminal window
# Install
docker run -d -p 9009:9009 -p 9091:9091 grafana/mimir
# Configure
# /etc/mimir/config.yaml
schema:
configs:
- from: 2023-01-01
store: tsdb
object_store: files
schema: v11
index:
prefix: index_
period: 168h

Answer:

Terminal window
# Install
docker run -d -p 8428:8428 victoriametrics/victorialogs-datasource
# Single
docker run -d -p 8428:8428 -v /victoria-data:/victoria-data victoriametrics/victoria-metrics
# Cluster
docker run -d -p 8480:8480 victoriametrics/vmcluster

Answer:

Terminal window
# Install
apt install goaccess
# Real-time
goaccess /var/log/nginx/access.log -o /var/www/html/report.html --real-time-html
# Generate static
goaccess /var/log/nginx/access.log -o report.html

Answer:

Terminal window
# Download logs
aws s3 cp s3://bucket/logs/ ./logs/
# Analyze
goaccess access.log*.log -a -o report.html
# With GeoIP
goaccess access.log -a -o report.html --geoip-database=GeoLite2-City.mmdb

Answer:

Terminal window
# Install
apt install analog
# Configure
# /etc/analog.cfg
LOGFILE /var/log/apache2/access.log
OUTFILE /var/www/html/analog.html
HOSTNAME myserver.com

Answer:

Terminal window
# Install
apt install awstats
# Configure
# /etc/awstats/awstats.mysite.conf
SiteName="mysite.com"
LogFile="/var/log/apache2/access.log"
DirData="/var/lib/awstats"
# Generate
awstats_updateall.pl now
awstats -config=mysite -output -staticlinks > /var/www/html/awstats.html

Answer:

Terminal window
# Install via Docker
docker run -d -p 8080:80 \
-v /opt/matomo:/var/www/html \
-v /opt/matomo:/var/lib/mysql \
matomo
# Configure web server
# Add tracking code to pages
<script>
var _paq = window._paq = window._paq || [];
_paq.push(['trackPageView']);
_paq.push(['enableLinkTracking']);
(function() {
var u="//matomo.example.com/";
_paq.push(['setTrackerUrl', u+'matomo.php']);
_paq.push(['setSiteId', '1']);
var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0];
g.type='text/javascript'; g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s);
})();
</script>

Answer:

Terminal window
# Install nfdump
apt install nfdump nfcapd
# Capture
nfcapd -w -D -i eth0 -z -n "ip,/var/cache/netflow,YYYYMMDD,HHMM"
# Analyze
nfdump -r nfcapd.202301011300 -a -s ip/bytes
# Filter
nfdump -r nfcapd.202301011300 'src ip 192.168.1.0/24'

Answer:

Terminal window
# Install
apt install sflowtool
# Configure switch
# sflow collector <ip> <port>
# Analyze
sflowtool -l eth0
sflowtool -r sflow.dat -json

Answer:

Terminal window
# Install
apt install ipfixprobe
# Capture
# Configure exporter
# Analyze
# Use elasticsearch or other collectors

Answer:

Terminal window
# Install
apt install zeek
# Run
zeek -i eth0
# Logs
ls /var/log/zeek/current/
# Analysis
zeek-cut < conn.log | awk '{print $5}' | sort | uniq -c | sort -rn

Answer:

Terminal window
# Install
# Using Docker
docker run -d --name moloch \
-e ELASTICSEARCH=http://localhost:9200 \
-p 8000:8000 \
-p 9200:9200 \
aircan/moloch
# Configure capture
# /data/moloch/etc/config.ini

Answer:

Terminal window
# Install
apt install ntopng
# Configure
# /etc/ntopng/ntopng.conf
-G=/var/run/ntopng.pid
-i=eth0
-w=3000
# Start
systemctl start ntopng
# Access
# http://localhost:3000

Answer:

Terminal window
# Install
apt install ntop
# Run
ntop -i eth0
# Access
# http://localhost:3000

Answer:

Terminal window
# Install
apt install darkstat
# Configure
# /etc/darkstat/init.cfg
INTERFACE="-i eth0"
START_DAEMON=1
# Start
systemctl start darkstat

Answer:

Terminal window
# Install
apt install bandwidthd
# Configure
# /etc/bandwidthd/bandwidthd.conf
subnet 192.168.1.0/24
# Start
systemctl start bandwidthd

Answer:

Terminal window
# Install
apt install vnstat
# Configure
# /etc/vnstat.conf
Interface "eth0"
# Start
systemctl start vnstat
# View
vnstat
vnstat -h
vnstat -l
vnstat -m

Answer:

Terminal window
# Server
iperf3 -s
# Client
iperf3 -c server-ip
# Test bandwidth
iperf3 -c server-ip -P 4 # parallel
# UDP test
iperf3 -c server-ip -u -b 1G

Answer:

Terminal window
# Install
apt install tcpreplay
# Replay pcap
tcpreplay -i eth0 capture.pcap
# Loop
tcpreplay -i eth0 -l 10 capture.pcap
# Speed
tcpreplay -i eth0 -M 10 capture.pcap

Answer:

Terminal window
# Install
apt install netsniff-ng
# Capture
netsniff-ng -i eth0 -o capture.pcap
# Replay
netsniff-ng -i eth0 -r capture.pcap
# BPF filter
netsniff-ng -i eth0 -f "tcp port 80" -o http.pcap

Answer:

Terminal window
# Install
# Download from ntop.org
# Load module
modprobe pf_ring
# Check
cat /proc/net/pf_ring/info
# Use with applications
# tcpdump -i eth0

Answer:

Terminal window
# Install
# From dpdk.org
# Setup huge pages
echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
# Bind NIC
dpdk-devbind.py --bind igb_uio 0000:01:00.0
# Run testpmd
./build/app/testpmd -l 0-3 -n 4 -- -i

Questions 481-1000 continue in next file…