Skip to content

Linux_Practical_Interview_501 750

Linux Practical Interview Questions (501-750)

Section titled “Linux Practical Interview Questions (501-750)”

Answer:

Terminal window
# Create users from file
while IFS=: read -r user pass uid gid info home shell; do
useradd -u "$uid" -g "$gid" -d "$home" -s "$shell" -c "$info" "$user"
done < users.txt
# Password in bulk
for user in user1 user2 user3; do
echo "$user:password" | chpasswd
done
# CSV import
# username,uid,gid,info,home,shell,password

Answer:

Terminal window
# Create groups
for group in group1 group2 group3; do
groupadd "$group"
done
# Add users to group
gpasswd -M user1,user2,user3 groupname
# Add users one by one
for user in user1 user2; do
usermod -aG groupname "$user"
done

Q483: How do you set up password policies?

Section titled “Q483: How do you set up password policies?”

Answer:

/etc/login.defs
# PAM configuration
PASS_MAX_DAYS 90
PASS_MIN_DAYS 1
PASS_MIN_LEN 12
PASS_WARN_AGE 7
# Or using chage
# Set password expiration
chage -M 90 username
chage -m 1 username
chage -W 7 username
# View
chage -l username

Answer:

Terminal window
# Edit sudoers
visudo
# Grant all privileges
username ALL=(ALL:ALL) ALL
# Passwordless
username ALL=(ALL) NOPASSWD: ALL
# Specific commands
username ALL=(ALL) /bin/ls, /bin/kill
# Group
%sudo ALL=(ALL:ALL) ALL

Answer:

Terminal window
# Lock account
usermod -L username
# or
passwd -l username
# Unlock
usermod -U username
# or
passwd -u username
# Check status
passwd -S username

Q486: How do you set up SSH key management?

Section titled “Q486: How do you set up SSH key management?”

Answer:

Terminal window
# Generate key
ssh-keygen -t ed25519 -C "user@host"
# Copy to server
ssh-copy-id user@server
# Restrict keys
# ~/.ssh/authorized_keys
from="192.168.1.10",command="/bin/ls" ssh-ed25519 AAAA...
# Force command
command="git-shell" ssh-ed25519 AAAA...

Q487: How do you configure SSH agent forwarding?

Section titled “Q487: How do you configure SSH agent forwarding?”

Answer:

Terminal window
# Start agent
ssh-agent bash
# Add key
ssh-add
# Forward agent
# ~/.ssh/config
Host server
ForwardAgent yes
# Use
ssh -A user@server

Answer:

Terminal window
# Local port forward
ssh -L 8080:localhost:80 user@server
# Remote port forward
ssh -R 8080:localhost:80 user@server
# Dynamic SOCKS proxy
ssh -D 8080 user@server
# Keep alive
ssh -o ServerAliveInterval=60 user@server

Q489: How do you configure rsync over SSH?

Section titled “Q489: How do you configure rsync over SSH?”

Answer:

Terminal window
# With SSH key
rsync -avz -e "ssh -i /path/to/key" source/ user@server:/dest/
# With password
rsync -avz -e "sshpass -p password ssh" source/ user@server:/dest/
# With specific port
rsync -avz -e "ssh -p 2222" source/ user@server:/dest/

Answer:

Terminal window
# With identity file
scp -i /path/to/key file user@server:/path/
# With specific cipher
scp -c aes256-gcm@openssh.com file user@server:/path/
# Recursive
scp -r directory/ user@server:/path/
# With compression
scp -C file user@server:/path/

Answer:

/etc/auto.master
/- /etc/auto.direct
# /etc/auto.direct
/mnt/nfs -rw,soft,intr server:/share
# Start
systemctl enable autofs
systemctl start autofs
# Test
ls /mnt/nfs

Q492: How do you configure autofs with LDAP?

Section titled “Q492: How do you configure autofs with LDAP?”

Answer:

/etc/auto.master
# Install
apt install autofs-ldap
/home ldap ldap.example.com:/home/&
# /etc/ldap.conf
uri ldap://ldap.example.com
base dc=example,dc=com

Answer:

Terminal window
# Install
apt install libnss-ldap libpam-ldap
# Configure
# /etc/nsswitch.conf
passwd: compat ldap
group: compat ldap
shadow: compat ldap
# PAM
# /etc/pam.d/common-session
session optional pam_mkhomedir.so skel=/etc/skel umask=077

Answer:

Terminal window
# Install
apt install sssd realmd
# Configure
# /etc/sssd/sssd.conf
[sssd]
services = nss, pam
domains = LDAP
[domain/LDAP]
id_provider = ldap
auth_provider = ldap
ldap_uri = ldap://ldap.example.com
ldap_search_base = dc=example,dc=com
# Enable
systemctl enable sssd

Q495: How do you integrate with Active Directory?

Section titled “Q495: How do you integrate with Active Directory?”

Answer:

Terminal window
# Install
apt install realmd sssd adcli
# Join domain
realm join --user=admin example.com
# Check
realm list
id user@EXAMPLE.COM
# Login
login user@EXAMPLE.COM

Answer:

/etc/krb5.conf
# Install
apt install krb5-user
[libdefaults]
default_realm = EXAMPLE.COM
[realms]
EXAMPLE.COM = {
kdc = kdc.example.com
admin_server = kdc.example.com
}
# Get ticket
kinit admin@EXAMPLE.COM
klist
kdestroy

Answer:

/etc/sssd/sssd.conf
# Configure
[sssd]
cache_credentials = true
entry_cache_timeout = 600
# Clear cache
sss_cache -E
# Debug
sss_debuglevel 9
# Check
getent passwd user@domain

Answer:

/etc/samba/smb.conf
# Install
apt install winbind libpam-winbind
[global]
workgroup = EXAMPLE
security = ads
realm = EXAMPLE.COM
winbind enum users = yes
winbind enum groups = yes
# Join domain
net ads join -U admin
# Enable
systemctl enable winbind

Answer:

Terminal window
# Configure authentication
authconfig --enableshadow --enablemd5 \
--ldapserver=ldap.example.com \
--ldapbasedn="dc=example,dc=com" \
--update
# With Kerberos
authconfig --enablekrb5 \
--krb5realm=EXAMPLE.COM \
--krb5kdc=kdc.example.com \
--update

Answer:

/etc/ldap/slapd.conf
# Master config
serverID 1
syncrepl rid=001
provider=ldap://ldap2.example.com
bindmethod=simple
binddn="cn=admin,dc=example,dc=com"
credentials=secret
searchbase="dc=example,dc=com"
type=refreshAndPersist

Answer:

Terminal window
# Create linear mapping
echo "0 100000 linear /dev/sdb1 0" | dmsetup create vol1
# Remove
dmsetup remove vol1
# Status
dmsetup status
dmsetup table

Q502: How do you configure dm-crypt with LUKS?

Section titled “Q502: How do you configure dm-crypt with LUKS?”

Answer:

Terminal window
# Create
cryptsetup luksFormat /dev/sdb1
# Open
cryptsetup open /dev/sdb1 crypt_vol
# Format
mkfs.xfs /dev/mapper/crypt_vol
# Mount
mount /dev/mapper/crypt_vol /mnt
# Close
umount /mnt
cryptsetup close crypt_vol

Answer:

Terminal window
# Install
apt install veracrypt
# Create volume
veracrypt -c
# Mount
veracrypt /path/to/container /mnt/veracrypt
# Dismount
veracrypt -d /path/to/container

Answer:

Terminal window
# Generate key
gpg --full-generate-key
# Encrypt
gpg -e -r recipient@example.com file.txt
# Decrypt
gpg -d file.txt.gpg
# Symmetric
gpg -c file.txt
gpg -o file.txt.gpg -c file.txt

Answer:

Terminal window
# Install
go install filippo.io/age/cmd/age@latest
# Generate keys
age-keygen
# Encrypt
age -p -o file.txt.age file.txt
# Decrypt
age -d -i key.txt file.txt.age

Answer:

Terminal window
# Create verity device
veritysetup format /dev/sda1 /dev/sda2 > verity_params
# Mount
veritysetup open /dev/sda2 verity_root /dev/sda1
# Check
veritysetup verify /dev/sda2 verity_root /dev/sda1

Answer:

Terminal window
# Null device (discard writes)
dmsetup create null /dev/zero 0
# Zero device (return zeros)
dmsetup create zero /dev/null 0
# Use for testing
dd if=/dev/zero of=/dev/mapper/zero bs=1M count=100

Answer:

Terminal window
# Create RAID 6
mdadm --create /dev/md0 --level=6 --raid-devices=4 /dev/sdb1 /dev/sdc1 /dev/sdd1 /dev/sde1
# With spare
mdadm --create /dev/md0 --level=6 --raid-devices=4 --spare-devices=1 /dev/sdb1 /dev/sdc1 /dev/sdd1 /dev/sde1 /dev/sdf1
# Check
cat /proc/mdstat

Answer:

Terminal window
# Create RAID 10
mdadm --create /dev/md0 --level=10 --raid-devices=4 /dev/sdb1 /dev/sdc1 /dev/sdd1 /dev/sde1
# Layout
# n = near (default)
# f = far
# o = offset
mdadm --create /dev/md0 --level=10 --raid-devices=4 --layout=n2 /dev/sdb1 /dev/sdc1 /dev/sdd1 /dev/sde1

Answer:

Terminal window
# Convert RAID1 to RAID5
mdadm --grow /dev/md0 --level=5 --raid-devices=3
mdadm --add /dev/md0 /dev/sdc1
# Convert to larger disk
mdadm --manage /dev/md0 --fail /dev/sdb1
mdadm --manage /dev/md0 --replace /dev/sdb1

Answer:

Terminal window
# Check status
cat /proc/mdstat
# Detailed
mdadm --detail /dev/md0
# Monitor
mdadm --monitor --daemonise --mail=admin@example.com /dev/md0
# Log rotation
# /etc/logrotate.d/mdadm
/var/log/mdadm.log {
weekly
rotate 4
}

Answer:

Terminal window
# Add bitmap
mdadm --grow /dev/md0 --bitmap=internal
# Add external bitmap
mdadm --grow /dev/md0 --bitmap=/boot/md0.bitmap
# Remove bitmap
mdadm --grow /dev/md0 --bitmap=none

Q513: How do you recover from RAID failure?

Section titled “Q513: How do you recover from RAID failure?”

Answer:

Terminal window
# Check
mdadm --detail /dev/md0
# Identify failed
mdadm --examine /dev/sdb1
# Remove failed
mdadm /dev/md0 --remove /dev/sdb1
# Add new
mdadm /dev/md0 --add /dev/sdf1
# Rebuild
# Automatic

Answer:

Terminal window
# Create cache pool
lvcreate --type cache-pool -L 10G -n cache_pool vg_ssd
# Create cached LV
lvcreate --type cache -L 10G --cachepool cache_pool -n cached_lv vg_hdd

Answer:

Terminal window
# Backup
vgcfgbackup
# View backup
ls -l /etc/lvm/backup/
# Restore
vgcfgrestore vg_name
# Automated backup
# /etc/lvm/lvm.conf
backup {
backup = 1
backup_dir = "/etc/lvm/backup"
retain_min = 10
retain_days = 30
}

Answer:

Terminal window
# Create new PV
pvcreate /dev/sdc1
# Add to VG
vgextend vg_name /dev/sdc1
# Move data
pvmove /dev/sdb1 /dev/sdc1
# Remove old
vgreduce vg_name /dev/sdb1

Answer:

Terminal window
# Create thin pool
lvcreate -L 20G --thinpool thin_pool vg
# Create thin LV
lvcreate -V 100G --thin -n thin_lv vg/thin_pool
# Activate
lvchange -ay vg/thin_lv

Q518: How do you use LVM snapshots for backups?

Section titled “Q518: How do you use LVM snapshots for backups?”

Answer:

Terminal window
# Create snapshot
lvcreate -s -L 5G -n backup_snap /dev/vg/lv
# Mount snapshot
mount -o ro /dev/vg/backup_snap /mnt/snap
# Backup
tar -czf /backup/backup.tar.gz -C /mnt/snap .
# Remove snapshot
lvremove /dev/vg/backup_snap

Answer:

Terminal window
# Initial backup
btrfs send /mnt/snap1 | gzip > backup1.gz
# Incremental
btrfs send -p /mnt/snap1 /mnt/snap2 | gzip > backup2.gz
# Receive
gzip -d -c backup2.gz | btrfs receive /backup/

Answer:

Terminal window
# Full balance
btrfs balance start /mnt/btrfs
# Specific profile
btrfs balance start -dconvert=raid1 -mconvert=raid1 /mnt/btrfs
# With filters
btrfs balance start -dusage=50 /mnt/btrfs
# Status
btrfs balance status /mnt/btrfs

Answer:

Terminal window
# Convert to RAID1
btrfs balance start -dconvert=raid1 -mconvert=raid1 /mnt/btrfs
# Add device
btrfs device add /dev/sdc1 /mnt/btrfs
# Remove device
btrfs device remove /dev/sdb1 /mnt/btrfs

Answer:

Terminal window
# Install
apt install zfs-dkms zfsutils-linux
# Create pool
zpool create -f mirror pool1 /dev/sdb1 /dev/sdc1
# Create filesystem
zfs create pool1/data
# Set properties
zfs set compression=lz4 pool1/data
zfs set quota=10G pool1/data

Answer:

zfs snapshot pool1/data@snap1

zfs list -t snapshot

zfs rollback pool1/data@snap1

zfs clone pool1/data@snap1 pool1/clone

### Q524: How do you use zfs send/receive?
**Answer:**
```bash
# Full send
zfs send pool1/data@snap1 | ssh host "zfs receive pool1/data"
# Incremental
zfs send -i pool1/data@snap1 pool1/data@snap2 | ssh host "zfs receive pool1/data"
# Compressed
zfs send pool1/data@snap1 | gzip | ssh host "gunzip | zfs receive pool1/data"

Answer:

Terminal window
# Enable dedup
zfs set dedup=on pool1/data
# Check dedup ratio
zpool status -D pool1
# With specific algorithm
zfs set dedup=sha256,verify pool1/data

Answer:

Terminal window
# Create encrypted
zfs create -o encryption=on -o keyformat=passphrase pool1/encrypted
# Load key
zfs keyload -a # all
zfs keyload pool1/encrypted
# Unload
zfs keyunload pool1/encrypted

Answer:

Terminal window
# Discover
iscsiadm -m discovery -t st -p 192.168.1.10
# Login
iscsiadm -m node -T iqn.name -p 192.168.1.10 -l
# Logout
iscsiadm -m node -T iqn.name -p 192.168.1.10 -u
# Auto-login
iscsiadm -m node -T iqn.name -p 192.168.1.10 --op update -n node.startup -v automatic

Answer:

Terminal window
# Start
targetcli
# Create backstore
/backstores/block create name=block1 dev=/dev/sdb
# Create iSCSI target
/iscsi create iqn.2023-01.com.example:storage
# Create LUN
/iscsi/iqn.../tpg1/luns create /backstores/block/block1
# Create ACL
/iscsi/iqn.../tpg1/acls create iqn.2023-01.com.example:client
# Save
saveconfig
exit

Answer:

Terminal window
# List
multipath -ll
# Add path
multipath -a /dev/sdb
# Remove
multipath -d /dev/sdb
# Format
# /etc/multipath.conf
multipaths {
"3600605b00e0c960018e0c95c00000000" {
alias mpath0
}
}

Answer:

/etc/iscsi/iscsid.conf
# Configure session
node.session.xpanding_max = 16
node.session.initial_login_retry_max = 8
# Create sessions
# Multiple connections
iscsiadm -m node -T iqn.name -p ip1 --op update -n node.conn[0].iscsi.MaxRecvDataSegmentLength=262144

Answer:

Terminal window
# Show settings
ethtool -i eth0
# Pause
ethtool -A eth0 rx on tx on
# Coalesce
ethtool -C eth0 rx-usecs 100 tx-usecs 100
# Channels
ethtool -L eth0 combined 4
# Offloads
ethtool -K eth0 tso on gso on gro on

Q532: How do you configure network bonding modes?

Section titled “Q532: How do you configure network bonding modes?”

Answer:

/sys/class/net/bond0/bonding/mode
# Mode 0 (Round-robin)
echo 0 > /sys/class/net/bond0/bonding/mode
# Mode 1 (Active-backup)
echo 1 > /sys/class/net/bond0/bonding/mode
# Mode 4 (LACP)
echo 4 > /sys/class/net/bond0/bonding/mode
# Monitoring
echo 100 > /sys/class/net/bond0/bonding/miimon

Answer:

Terminal window
# Create team
teamd -o -n -g team0 -d '{"runner": {"name": "lacp"}}'
# Add ports
teamdctl team0 port add eth1
teamdctl team0 port add eth2
# Configure IP
ip addr add 192.168.1.10/24 dev team0
# Monitor
teamdctl team0 state

Answer:

Terminal window
# keepalived config
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
virtual_ipaddress {
192.168.1.100 dev eth0
}
track_interface {
eth0
}
}

Answer:

Terminal window
# Install
apt install bird
# Configure
# /etc/bird/bird.conf
protocol static {
route 10.0.0.0/8 via 192.168.1.1;
}
protocol direct {
interface "eth*";
}
protocol kernel {
learn;
export all;
}

Answer:

Terminal window
# Install
apt install frr
# Enable
# /etc/frr/daemons
ospfd=yes
# Configure
# /etc/frr/ospfd.conf
router ospf
network 192.168.1.0/24 area 0

Answer:

Terminal window
# Install
apt install quagga
# Configure
# /etc/quagga/zebra.conf
interface eth0
ip address 192.168.1.10/24
# /etc/quagga/ospfd.conf
router ospf
network 192.168.1.0/24 area 0

Answer:

Terminal window
# Install
apt install bird2
# Configure
# /etc/bird/bird.conf
router id 192.168.1.10;
protocol static {
route 10.0.0.0/8 via 192.168.1.1;
}
protocol bgp AS65000 {
neighbor 192.168.1.20 as AS65001;
import all;
export all;
}

Answer:

Terminal window
# Bird config
protocol bgp Peering {
local as 65000;
neighbor 192.168.1.20 as 65001;
import all;
export all;
add paths tx;
add paths rx;
}

Answer:

Terminal window
# Bird config
protocol ospf MyOSPF {
tick 2;
area 0 {
interface "eth*" {
type broadcast;
};
};
}

Answer:

Terminal window
# Create table
nft add table inet filter
# Add chain
nft add chain inet filter input { type filter hook input priority 0; }
# Add rule
nft add rule inet filter input ct state established,related accept
# NAT
nft add table ip nat
nft add chain ip nat postrouting { type nat hook postrouting priority 100; }
nft add rule ip nat postrouting oifname "eth0" masquerade

Answer:

Terminal window
# View connections
conntrack -L
conntrack -L -p tcp
# Count
conntrack -C
# Delete
conntrack -D -p tcp --dport 80
# NAT
conntrack -L -n | grep NAT

Answer:

Terminal window
# List
ebtables -L
# Bridge filter
ebtables -A FORWARD -p IPv4 --ip-proto tcp --ip-dport 80 -j DROP
# NAT
ebtables -t nat -A PREROUTING -i eth0 -j redirect --redirect-target ACCEPT

Answer:

Terminal window
# Add htb qdisc
tc qdisc add dev eth0 root handle 1: htb default 10
# Add class
tc class add dev eth0 parent 1: classid 1:10 htb rate 100mbit ceil 100mbit
# Add filter
tc filter add dev eth0 parent 1: protocol all prio 1 u32 match ip src 192.168.1.0/24 flowid 1:10

Answer:

Terminal window
# Rate limit
tc qdisc add dev eth0 root tbf rate 10mbit burst 15k latency 50ms
# Delay
tc qdisc add dev eth0 root netem delay 100ms
# Loss
tc qdisc add dev eth0 root netem loss 10%
# Reorder
tc qdisc add dev eth0 root netem delay 100ms reorder 25%

Answer:

Terminal window
# Install
apt install ipp2p
# Block P2P
iptables -A FORWARD -m ipp2p --ipp2p -j DROP
# With conntrack
iptables -A FORWARD -m layer7 --l7proto bittorrent -j DROP

Answer:

Terminal window
# Mark packets
iptables -A INPUT -j NFQUEUE --queue-num 0
# Process with Python
#!/usr/bin/env python3
import NFQueue
def callback(payload):
print(payload.get_data())
payload.set_verdict(NFQueue.NF_ACCEPT)
q = NFQueue.Queue()
q.set_callback(callback)
q.open()
q.bind()
q.try_run()

Answer:

Terminal window
# Install
apt install iftop
# Usage
iftop
iftop -i eth0
iftop -i eth0 -n # no DNS
iftop -i eth0 -B # bytes

Answer:

Terminal window
# Install
apt install nethogs
# Usage
nethogs
nethogs eth0
nethogs -d 2 # refresh
nethogs -p # promiscuous

Answer:

Terminal window
# Install
apt install bmon
# Usage
bmon
bmon -p eth0
bmon -b -o interactive

Answer:

Terminal window
# Install
apt install vnstat
# Configure
vnstat -u -i eth0
# View
vnstat
vnstat -h
vnstat -d
vnstat -m
vnstat -l

Answer:

Terminal window
# Install
apt install iptraf-ng
# Usage
iptraf-ng
iptraf-ng -i all
iptraf-ng -i eth0

Answer:

Terminal window
# Install
apt install pktstat
# Usage
pktstat -i eth0
pktstat -i eth0 -T # show times
pktstat -i eth0 -w # wide

Answer:

Terminal window
# Install
apt install slurm
# Usage
slurm -i eth0
slurm -i eth0 -s # split TX/RX

Answer:

Terminal window
# Install
apt install trafshow
# Usage
trafshow -i eth0
trafshow -i eth0 -c tcp

Answer:

Terminal window
# Enable nested
modprobe kvm_intel nested=1
# or
modprobe kvm_amd nested=1
# Check
cat /sys/module/kvm_intel/parameters/nested
# In VM
# CPU model
<cpu mode='host-passthrough'>

Q557: How do you configure KVM CPU pinning?

Section titled “Q557: How do you configure KVM CPU pinning?”

Answer:

Terminal window
# Pin to cores
virsh vcpupin domain 0 0,1,2,3
# Pin with emulator
virsh emulatorpin domain 0 0-3
# NUMA
virsh numatune domain --mode preferred --nodeset 0-1

Answer:

Terminal window
# Enable huge pages
echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
# VM config
# Memory
<memory unit='KiB'>4194304</memory>
<currentMemory unit='KiB'>4194304</currentMemory>
<hugepages>
<page size='2048' unit='KiB'/>
</hugepages>

Answer:

Terminal window
# Enable SR-IOV
echo 2 > /sys/bus/pci/devices/0000:01:00.0/sriov_numvfs
# Create VF
# Check
lspci | grep -i virtual
# Assign to VM
virsh attach-interface --domain vm --type hostdev --source 0000:01:00.2 --managed --config

Answer:

/etc/default/grub
# Enable IOMMU
GRUB_CMDLINE_LINUX_DEFAULT="intel_iommu=on"
# Update
update-grub
# Reboot
# Attach
virsh attach-device vm /etc/libvirt/qemu/pci.xml --persistent
# pci.xml
<hostdev mode='subsystem' type='pci' managed='yes'>
<source>
<address domain='0x0000' bus='0x01' slot='0x00' function='0x0'/>
</source>
</hostdev>

Answer:

Terminal window
# Create secret
virsh secret-define secret.xml
# Set value
virsh secret-set-value UUID base64value
# Use in VM
# disk
<secret type='passphrase'>
<uuid>uuid</uuid>
</secret>

Q562: How do you configure KVM live migration?

Section titled “Q562: How do you configure KVM live migration?”

Answer:

Terminal window
# Shared storage
# NFS or similar
# Allow migration
# /etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
auth_tcp = "sasl"
# Migrate
virsh migrate --live --persistent vm qemu+tcp://dest/system
# With compression
virsh migrate --live --compressed vm qemu+tcp://dest/system

Answer:

Terminal window
# Create snapshot
virsh snapshot-create-as vm --name snap1
# List
virsh snapshot-list vm
# Revert
virsh snapshot-revert vm snap1
# Delete
virsh snapshot-delete vm snap1

Q564: How do you configure KVM memory ballooning?

Section titled “Q564: How do you configure KVM memory ballooning?”

Answer:

Terminal window
# Enable in VM
<memballoon model='virtio'>
</memballoon>
# Dynamic adjustment
virsh setmem vm 2G --config
virsh setmaxmem vm 4G --config

Answer:

Terminal window
# Load module
modprobe vhost_net
# Enable in VM
<interface type='network'>
<model type='virtio'/>
<driver name='vhost'/>
</interface>

Answer:

Terminal window
# Install
vagrant plugin install vagrant-libvirt
# Vagrantfile
Vagrant.configure("2") do |config|
config.vm.box = "generic/ubuntu2204"
config.vm.provider "libvirt" do |lv|
lv.memory = 2048
lv.cpus = 2
end
end
# Commands
vagrant up --provider=libvirt
vagrant destroy

Answer:

Terminal window
# Install
apt install virt-manager
# Run
virt-manager
# Connect to remote
# File -> Add Connection
# QEMU/KVM over SSH

Answer:

/etc/default/grub
# Enable console in VM
GRUB_CMDLINE_LINUX_DEFAULT="console=tty0 console=ttyS0,115200"
# Connect
virsh console vm
# Exit
# Ctrl+]

Answer:

Terminal window
# Download cloud image
wget https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img
# Create VM with cloud-init
virt-install --name vm1 \
--memory 2048 --vcpu 2 \
--disk jammy-server-cloudimg-amd64.img,format=qcow2 \
--cloud-init root-disk-generate=empty
# Or with manual
# genisoimage -o seed.iso -volid cidata -joliet -rock user-data meta-data

Answer:

Terminal window
# Mount
guestmount -a disk.img -m /dev/sda1 /mnt
# List files
virt-ls -a disk.img /
# Edit
virt-edit -a disk.img /etc/passwd
# Copy in/out
virt-copy-in -a disk.img file.txt /root/
virt-copy-out -a disk.img /root/file.txt ./
# Cat
virt-cat -a disk.img /etc/passwd

Q571: How do you use Docker user namespace remapping?

Section titled “Q571: How do you use Docker user namespace remapping?”

Answer:

/etc/docker/daemon.json
# Enable in daemon
{
"userns-remap": "default"
}
# Or custom
{
"userns-remap": "myuser"
}
# Create user
useradd -r -u 100000 -g dockremap dockremap
# Configure subuid/subgid
usermod -v 100000-165536 -w 100000-165536 dockremap

Answer:

Terminal window
# Install
apt install docker-ce-rootless-extras
# Setup
dockerd-rootless-setuptool.sh install
# Start
systemctl --user enable docker
systemctl --user start docker
# Use
docker run nginx

Q573: How do you configure Docker storage driver?

Section titled “Q573: How do you configure Docker storage driver?”

Answer:

/etc/docker/daemon.json
{
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}

Answer:

Terminal window
# Enable
export DOCKER_BUILDKIT=1
# In daemon
# /etc/docker/daemon.json
{
"features": {
"buildkit": true
}
}
# Build
docker build --progress=plain .

Answer:

Terminal window
# Build with cache
docker build .
# No cache
docker build --no-cache .
# Cache from
docker build --cache-from image:tag .
# Prune cache
docker builder prune

Q576: How do you use Docker multi-stage builds?

Section titled “Q576: How do you use Docker multi-stage builds?”

Answer:

# Build stage
FROM golang:1.20 AS builder
WORKDIR /app
COPY . .
RUN go build -o main .
# Runtime stage
FROM alpine:3.18
COPY --from=builder /app/main /app/main
CMD ["/app/main"]

Answer:

Terminal window
# Create secret
echo "password" | docker secret create db_password -
# Use in service
docker secret ls
docker secret inspect db_password
# docker-compose.yml
secrets:
db_password:
external: true
# Or
secrets:
db_password:
file: ./db_password.txt

Answer:

Terminal window
# Create config
docker config create nginx_config nginx.conf
# Use
docker config ls
# In service
configs:
- source: nginx_config
target: /etc/nginx/nginx.conf
mode: 0444

Q579: How do you configure Docker healthchecks?

Section titled “Q579: How do you configure Docker healthchecks?”

Answer:

HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD curl -f http://localhost/ || exit 1

Q580: How do you use Docker logging drivers?

Section titled “Q580: How do you use Docker logging drivers?”

Answer:

Terminal window
# JSON file
docker run --log-driver json-file --log-opt max-size=10m --log-opt max-file=3 nginx
# Syslog
docker run --log-driver syslog --log-opt syslog-address=tcp://localhost:514 nginx
# Fluentd
docker run --log-driver fluentd --log-opt fluentd-address=localhost:24224 nginx

Q581: How do you use Docker resource limits?

Section titled “Q581: How do you use Docker resource limits?”

Answer:

Terminal window
# Memory
docker run -m 512m nginx
# CPU
docker run --cpus=0.5 nginx
# Blkio
docker run --device-read-bps /dev/sda:50mb nginx
# Restart policy
docker run --restart=unless-stopped nginx

Answer:

Terminal window
# Create network
docker network create mynet
# Overlay
docker network create -d overlay mynet
# Use network
docker run --network mynet nginx
# Static IP
docker network connect --ip 10.0.0.10 mynet container

Answer:

Terminal window
# Named volume
docker volume create myvol
docker run -v myvol:/data nginx
# Bind mount
docker run -v $(pwd):/data nginx
# tmpfs
docker run --tmpfs /tmp nginx
# NFS
docker volume create --driver local \
-o type=nfs \
-o o=addr=192.168.1.10,rw \
-o device=:/path nfsvol

Q584: How do you use Docker compose profiles?

Section titled “Q584: How do you use Docker compose profiles?”

Answer:

version: '3.9'
services:
web:
image: nginx
db:
image: mysql
profiles: ["database"]
admin:
image: adminer
profiles: ["admin"]

Q585: How do you use Docker compose extends?

Section titled “Q585: How do you use Docker compose extends?”

Answer:

docker-compose.base.yml
services:
web:
build: .
environment:
- ENV=production
# docker-compose.yml
services:
web:
extends:
file: docker-compose.base.yml
service: web
ports:
- "80:80"

Q586: How do you use Docker compose networking?

Section titled “Q586: How do you use Docker compose networking?”

Answer:

version: '3'
services:
web:
build: .
networks:
- frontend
api:
build: .
networks:
- frontend
- backend
networks:
frontend:
backend:
driver: overlay

Q587: How do you use Docker healthchecks in compose?

Section titled “Q587: How do you use Docker healthchecks in compose?”

Answer:

services:
web:
image: nginx
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s

Answer:

# nginx.container
[Container]
Image=nginx:latest
PublishPort=8080:80
[Service]
Restart=always
[Install]
WantedBy=multi-user.target

Q589: How do you use Podman generate systemd?

Section titled “Q589: How do you use Podman generate systemd?”

Answer:

Terminal window
# Generate unit files
podman generate systemd --name mycontainer > container.service
# With dependencies
podman generate systemd --name mycontainer --files --new
# Create drop-in
mkdir -p /etc/systemd/system/container.service.d

Answer:

Terminal window
# Create network
podman network create mynet
# Run with network
podman run -d --network mynet nginx
# DNS
podman run -d --network mynet --dns 8.8.8.8 nginx

Q591: How do you use Buildah in rootless mode?

Section titled “Q591: How do you use Buildah in rootless mode?”

Answer:

Terminal window
# Build
buildah --storage-driver vfs bud -t myimage .
# Without root
buildah --userns=keep-id bud -t myimage .

Answer:

Terminal window
# With TLS
skopeo inspect --tls-verify=false docker://example.com/image
# With credentials
skopeo inspect docker://user:pass@example.com/image
# Using config
skopeo --authfile ~/.docker/config.json inspect docker://example.com/image

Q593: How do you configure containerd with TLS?

Section titled “Q593: How do you configure containerd with TLS?”

Answer:

/etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.k8s.io/pause:3.9"
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
# /etc/containerd/certs.d/docker.io/hosts.toml
server = "https://docker.io"
[host."https://registry-1.docker.io"]
capabilities = ["pull"]

Answer:

Terminal window
# Run with containerd
ctr images pull docker.io/library/nginx:latest
ctr run -d docker.io/library/nginx:latest nginx

Answer:

/etc/crictl.yaml
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
tls:
ca_file: /path/to/ca.crt
cert_file: /path/to/cert.crt
key_file: /path/to/key.crt

Answer:

Terminal window
# Like docker
nerdctl pull nginx
nerdctl run -d nginx
nerdctl ps
nerdctl build -t myimage .
# With containerd
nerdctl --address /run/containerd/containerd.sock ps

Answer:

Terminal window
# Build
docker run -v $(pwd):/workspace gcr.io/kaniko-project/executor:latest \
--context /workspace \
--destination gcr.io/myproject/image:tag
# With dockerfile
docker run -v $(pwd):/workspace gcr.io/kaniko-project/executor:latest \
--context /workspace \
--dockerfile /workspace/Dockerfile \
--destination gcr.io/myproject/image:tag

Answer:

Terminal window
# Install pack
pack --version
# Build
pack build myapp --builder paketobuildpacks/builder:base
# Detect
pack detect myapp
# Rebase
pack rebase myapp --run-image gcr.io/paketo-buildpacks/run:base-cnb

Answer:

helmfile.yaml
repositories:
- name: nginx
url: https://helm.nginx.com/stable
releases:
- name: nginx-ingress
chart: nginx/nginx-ingress
values:
- ./values.yaml

Answer:

Terminal window
# Install
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
# CLI
argocd login localhost:8080
# Add app
argocd app create myapp \
--repo https://github.com/myrepo/myapp \
--path . \
--dest-server https://kubernetes.default.svc \
--dest-namespace default

Answer:

# ServiceAccount
apiVersion: v1
kind: ServiceAccount
metadata:
name: myapp-sa
---
# Role
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: myapp-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
# RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: myapp-rolebinding
subjects:
- kind: ServiceAccount
name: myapp-sa
roleRef:
kind: Role
name: myapp-role
apiGroup: rbac.authorization.k8s.io

Q602: How do you use Kubernetes NetworkPolicy?

Section titled “Q602: How do you use Kubernetes NetworkPolicy?”

Answer:

apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: deny-all
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: allow-app
spec:
podSelector:
matchLabels:
app: web
ingress:
- from:
- podSelector:
matchLabels:
app: api
ports:
- protocol: TCP
port: 80

Answer:

apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: myapp-pdb
spec:
minAvailable: 2
selector:
matchLabels:
app: myapp
---
# Or percentage
spec:
maxUnavailable: 25%
selector:
matchLabels:
app: myapp

Q604: How do you use Kubernetes ResourceQuota?

Section titled “Q604: How do you use Kubernetes ResourceQuota?”

Answer:

apiVersion: v1
kind: ResourceQuota
metadata:
name: myquota
spec:
hard:
requests.cpu: "4"
requests.memory: 8Gi
limits.cpu: "8"
limits.memory: 16Gi
pods: "10"
services: "5"

Q605: How do you use Kubernetes LimitRange?

Section titled “Q605: How do you use Kubernetes LimitRange?”

Answer:

apiVersion: v1
kind: LimitRange
metadata:
name: mylimits
spec:
limits:
- max:
cpu: "4"
memory: 8Gi
min:
cpu: 100m
memory: 128Mi
default:
cpu: 500m
memory: 1Gi
defaultRequest:
cpu: 200m
memory: 512Mi
type: Container

Q606: How do you use Kubernetes HorizontalPodAutoscaler?

Section titled “Q606: How do you use Kubernetes HorizontalPodAutoscaler?”

Answer:

apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: myapp-hpa
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: myapp
minReplicas: 2
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 10
periodSeconds: 60

Q607: How do you use Kubernetes VerticalPodAutoscaler?

Section titled “Q607: How do you use Kubernetes VerticalPodAutoscaler?”

Answer:

apiVersion: "autoscaling.k8s.io/v1"
kind: VerticalPodAutoscaler
metadata:
name: myapp-vpa
spec:
targetRef:
apiVersion: "apps/v1"
kind: Deployment
name: myapp
updatePolicy:
updateMode: "Auto"
resourcePolicy:
containerPolicies:
- containerName: myapp
minAllowed:
cpu: 100m
memory: 128Mi
maxAllowed:
cpu: "4"
memory: 8Gi

Q608: How do you use Kubernetes PodDisruptionBudget?

Section titled “Q608: How do you use Kubernetes PodDisruptionBudget?”

Answer:

apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: myapp
spec:
minAvailable: 2
selector:
matchLabels:
app: myapp

Q609: How do you use Kubernetes PriorityClass?

Section titled “Q609: How do you use Kubernetes PriorityClass?”

Answer:

apiVersion: scheduling.k8s.io/v1
kind: PriorityClass
metadata:
name: high-priority
value: 1000000
globalDefault: false
description: "High priority for critical workloads"
---
# Use in pod
priorityClassName: high-priority

Q610: How do you use Kubernetes ServiceMesh?

Section titled “Q610: How do you use Kubernetes ServiceMesh?”

Answer:

# Istio VirtualService
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: myapp
spec:
hosts:
- myapp
http:
- route:
- destination:
host: myapp
subset: v1
weight: 90
- destination:
host: myapp
subset: v2
weight: 10

Q611: How do you use Kubernetes ingress controller?

Section titled “Q611: How do you use Kubernetes ingress controller?”

Answer:

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myapp
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- host: myapp.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: myapp-svc
port:
number: 80

Q612: How do you use Kubernetes cert-manager?

Section titled “Q612: How do you use Kubernetes cert-manager?”

Answer:

apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: admin@example.com
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: nginx
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: myapp-tls
spec:
secretName: myapp-tls
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
dnsNames:
- myapp.example.com

Q613: How do you use Kubernetes secrets encryption?

Section titled “Q613: How do you use Kubernetes secrets encryption?”

Answer:

encryption-config.yaml
apiVersion: v1
kind: Secret
metadata:
name: aes-key
data: <base64-aes-key>
---
# api-server
--encryption-provider-config=encryption-config.yaml
# At rest
# Secrets will be encrypted with AES-GCM

Q614: How do you use Kubernetes pod security policy?

Section titled “Q614: How do you use Kubernetes pod security policy?”

Answer:

apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: restricted
spec:
privileged: false
seLinux:
rule: RunAsAny
runAsUser:
rule: MustRunAsNonRoot
fsGroup:
rule: RunAsAny
volumes:
- 'secret'
- 'configMap'

Q615: How do you use Kubernetes pod security admission?

Section titled “Q615: How do you use Kubernetes pod security admission?”

Answer:

# Pod
securityContext:
runAsNonRoot: true
runAsUser: 1000
fsGroup: 2000
seccompProfile:
type: RuntimeDefault
# Container
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL

Q616: How do you use Kubernetes topology keys?

Section titled “Q616: How do you use Kubernetes topology keys?”

Answer:

# Service
spec:
topologyKeys:
- "kubernetes.io/hostname"
- "topology.kubernetes.io/zone"
- "topology.kubernetes.io/region"
# Pod
# PreferredDuringSchedulingIgnoredDuringExecution
# RequiredDuringSchedulingIgnoredDuringExecution

Q617: How do you use Kubernetes service mesh linkerd?

Section titled “Q617: How do you use Kubernetes service mesh linkerd?”

Answer:

Terminal window
# Install CLI
curl -sL https://run.linkerd.io/install | sh
# Install
linkerd install | kubectl apply -f -
# Add to namespace
linkerd inject deployment.yml | kubectl apply -f -
# Dashboard
linkerd viz dashboard

Answer:

Terminal window
# Install Cilium CLI
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
CLI_ARCH=$(uname -m)
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
# Install
cilium install
# Status
cilium status

Answer:

Terminal window
# Install
kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
# BGP peering
calicoctl get bgppeer
# Apply BGPPeer
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: my-peer
spec:
peerIP: 192.168.1.1
asNumber: 64512

Answer:

Terminal window
# Install
kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
# Configure
# kube-flannel.yml
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}

Answer:

apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
name: prometheus
spec:
serviceAccountName: prometheus
serviceMonitorSelector:
matchLabels:
team: frontend
resources:
requests:
memory: 400Mi
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: myapp
labels:
team: frontend
spec:
selector:
matchLabels:
app: myapp
endpoints:
- port: web

Q622: How do you use Prometheus Alertmanager?

Section titled “Q622: How do you use Prometheus Alertmanager?”

Answer:

apiVersion: monitoring.coreos.com/v1beta1
kind: AlertmanagerConfig
metadata:
name: email-config
spec:
route:
groupBy: [alertname]
receiver: email
receivers:
- name: email
emailConfigs:
- to: admin@example.com
sendResolved: true

Answer:

# Sidecar
- args:
- sidecar
- --tsdb.path=/prometheus
- --prometheus.url=http://localhost:9090
- --grpc-address=:10901
- --http-address=:10902
image: thanosio/thanos:v0.30.0
name: thanos-sidecar

Answer:

# Helm values
cortex:
config:
schema:
configs:
- from: 2022-01-01
store: tsdb
object_store: s3
schema: v11
storage:
s3:
bucket: cortex-data

Answer:

# Helm
loki:
auth_enabled: false
schema_config:
configs:
- from: 2022-01-01
store: boltdb-shipper
object_store: s3
schema: v11
index:
prefix: loki_index_
period: 24h

Answer:

# Helm
tempo:
repository: grafana/tempo
tag: latest
service:
type: LoadBalancer
storage:
trace:
backend: s3
s3:
bucket: tempo-traces

Answer:

Terminal window
# Install
kubectl create namespace observability
kubectl apply -f https://github.com/jaegertracing/jaeger-operator/releases/latest/download/jaeger-operator.yaml -n observability
# Create Jaeger
apiVersion: jaegertracing.io/v1
kind: Jaeger
metadata:
name: jaeger
spec:
strategy: allInOne
allInOne:
image: jaegertracing/all-in-one:latest
options:
query:
basePath: /jaeger/ui

Answer:

Terminal window
# Install
docker run -d --name apm-server \
--user=apm-server \
-p 8200:8200 \
-e ELASTICSEARCH_HOSTS=elasticsearch:9200 \
-e OUTPUT_LOGSTASH_INTERNAL=true \
-e KIBANA_HOST=kibana:5601 \
docker.elastic.co/apm/apm-server:latest

Answer:

# Collector
apiVersion: v1
kind: ConfigMap
metadata:
name: otel-collector-config
data:
otel-collector-config: |
receivers:
otlp:
protocols:
grpc:
http:
exporters:
otlp:
endpoint: "tempo:4317"
logging:
loglevel: debug
service:
pipelines:
traces:
receivers: [otlp]
exporters: [otlp, logging]

Answer:

Terminal window
# Install
px deploy
# Use
px run script px/conn_stats
# View streams
px streams
# Get pod info
px pod 10.0.0.1

Q631: How do you use Terraform with Kubernetes?

Section titled “Q631: How do you use Terraform with Kubernetes?”

Answer:

provider "kubernetes" {
config_path = "~/.kube/config"
}
resource "kubernetes_deployment" "example" {
metadata {
name = "example"
labels = {
app = "example"
}
}
spec {
replicas = 3
selector {
match_labels = {
app = "example"
}
}
template {
metadata {
labels = {
app = "example"
}
}
spec {
container {
image = "nginx:1.21"
name = "nginx"
resources {
limits = {
cpu = "500m"
memory = "128Mi"
}
requests = {
cpu = "250m"
memory = "64Mi"
}
}
}
}
}
}
}

Answer:

helmfile.yaml
environments:
production:
values:
- production.yaml
staging:
values:
- staging.yaml
repositories:
- name: nginx
url: https://helm.nginx.com/stable
- name: ingress-nginx
url: https://kubernetes.github.io/ingress-nginx
releases:
- name: nginx-ingress
chart: ingress-nginx/ingress-nginx
version: "4.0.0"
values:
- ingress-nginx.yaml
- name: myapp
chart: ./myapp-chart
values:
- {{ .Environment.Name }}.yaml

Answer:

Terminal window
# Install
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
# Get password
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d
# Login
argocd login localhost:8080
# Add repo
argocd repo add https://github.com/myorg/myapp --username user --password pass
# Create app
argocd app create myapp \
--repo https://github.com/myorg/myapp \
--path . \
--dest-server https://kubernetes.default.svc \
--dest-namespace default

Answer:

Terminal window
# Install
flux install
# Create source
flux create source git myapp \
--url=https://github.com/myorg/myapp \
--branch=main
# Create kustomization
flux create kustomization myapp \
--source=myapp \
--path=./kustomize \
--prune=true \
--interval=10m
# Sync
flux reconcile source git myapp
flux reconcile kustomization myapp

Answer:

Terminal window
# Install
helm repo add crossplane-stable https://charts.crossplane.io/stable
helm install crossplane crossplane-stable/crossplane --namespace crossplane-system --create-namespace
# Install AWS provider
kubectl apply -f https://github.com/crossplane/provider-aws/releases/latest/download/provider-aws.yaml
# Create ProviderConfig
apiVersion: aws.upbound.io/v1beta1
kind: ProviderConfig
metadata:
name: default
spec:
credentials:
source: Secret
secretRef:
namespace: crossplane-system
name: aws-creds
key: credentials

Q636: How do you use External Secrets Operator?

Section titled “Q636: How do you use External Secrets Operator?”

Answer:

apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: aws-secrets-manager
spec:
provider:
aws:
service: SecretsManager
region: us-east-1
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: myapp-secrets
spec:
refreshInterval: 1h
secretStoreRef:
name: aws-secrets-manager
kind: ClusterSecretStore
target:
name: myapp-secrets
data:
- secretKey: DB_PASSWORD
remoteRef:
key: myapp/db-password

Answer:

# Install
kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/main/config/release/kyverno.yaml
# Policy
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-labels
spec:
validationFailureAction: enforce
rules:
- name: check-label
match:
resources:
kinds:
- Pod
validate:
message: "Label 'app' is required"
pattern:
metadata:
labels:
app: "?*"

Answer:

# Install
kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper-library/master/library/pod-security-policy/privileged-containers/template.yaml
# Constraint
apiVersion: constraints.gatekeeper.sh/v1beta1
kind: K8sPrivilegedContainer
metadata:
name: psp-privileged-container
spec:
match:
kinds:
- apiGroups: [""]
kinds: ["Pod"]

Answer:

Terminal window
# Install CLI
curl -s https://get.datree.io | /bin/bash
# Run in CI/CD
datree test deployment.yaml
# As kubectl plugin
kubectl datree test deployment.yaml

Answer:

Terminal window
# Run
kube-bench run --targets node
# As job
kubectl apply -f https://raw.githubusercontent.com/aquasecurity/kube-bench/main/job.yaml
# Specific targets
kube-bench run --targets=master,etcd,node

Answer:

Terminal window
# Install
curl -s https://raw.githubusercontent.com/kubescape/kubescape/master/install.sh | /bin/bash
# Scan
kubescape scan
# Submit to cloud
kubescape scan --submit --account EMAIL
# Host scanner
kubescape host-scan

Answer:

Terminal window
# Scan image
trivy image nginx:latest
# Scan filesystem
trivy fs .
# Scan running containers
trivy kubernetes --context kind-kind
# As admission controller
trivy server

Answer:

Terminal window
# Install
kubectl apply -f https://raw.githubusercontent.com/falcosecurity/falco/master/integrations/k8s-using-daemonset/falco-daemonset-configmap.yaml
# Rules
# /etc/falco/falco-rules.yaml
- rule: Shell in container
desc: notice shell activity
condition: container.id != host and proc.name = bash
output: "Shell in container (user=%user.name container_id=%container.id image=%container.image.repository)"
priority: WARNING

Answer:

Terminal window
# Install agent
kubectl create namespace sysdig-agent
curl -s https://download.sysdig.com/stable/agent-k8s.yaml | sed 's/CLUSTER_NAME/cluster-name/g' | kubectl apply -f -
# Use
sysdig -pc -c "topcontainers_cpu"

Answer:

Terminal window
# Install
kubectl apply -f https://download.aquasec.com/aquasec/aquasec.yaml
# Scan
aqua cvelib --version
# Enforce
# Add labels to namespace
kubectl label namespace production aqua.security=enforce

Answer:

Terminal window
# Install
npm install -g snyk
# Test container image
snyk container test nginx:latest
# Test Kubernetes
snyk k8s test --file=k8s-deployment.yaml
# Monitor
snyk monitor --docker --file=Dockerfile

Answer:

Terminal window
# Install
docker run -d --name anchore \
-p 8228:8228 -p 8338:8338 \
anchoreio/anchore-engine:latest
# Add image
curl -X POST http://localhost:8228/v1/images \
-d '{"source":"docker.io/library/nginx:latest"}'
# Analyze
curl -X GET http://localhost:8228/v1/images/$(uuid)/vuln

Answer:

Terminal window
# Initialize
notary -s https://notary.docker.io init docker.io/library/nginx
# Sign
notary -s https://notary.docker.io sign docker.io/library/nginx
# Verify
DOCKER_CONTENT_TRUST=1 docker pull nginx

Answer:

Terminal window
# Generate keys
cosign generate-key-pair
# Sign
cosign sign --key cosign.key image:tag
# Verify
cosign verify --key cosign.pub image:tag
# Store in registry
cosign store image:tag
# Verify with keyless
cosign verify image:tag

Answer:

Terminal window
# Keyless signing
cosign sign --key oidc image:tag
# Verify keyless
cosign verify image:tag
# Upload SBOM
cosign attach sbom --sbom spdx image:tag
# Verify attestation
cosign verify-attestation --key cosign.pub image:tag

Answer:

Terminal window
# Install
flux bootstrap git \
--url=https://github.com/org/repo \
--branch=main \
--path=clusters/mycluster
# Add Helm repo
flux create source helm ingress-nginx \
--url=https://kubernetes.github.io/ingress-nginx \
--interval=1h
# Create release
flux create helmrelease ingress-nginx \
--source=HelmRepository/ingress-nginx \
--chart=ingress-nginx \
--namespace=ingress-nginx

Answer:

apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: myworkflow-
spec:
entrypoint: main
templates:
- name: main
dag:
tasks:
- name: build
template: build
- name: test
template: test
dependencies: [build]
- name: build
container:
image: golang:latest
command: [make, build]
- name: test
container:
image: golang:latest
command: [make, test]

Answer:

apiVersion: argoproj.io/v1alpha1
kind: EventSource
metadata:
name: webhook
spec:
webhook:
example:
port: "12000"
endpoint: /example
method: POST
---
apiVersion: argoproj.io/v1alpha1
kind: Sensor
metadata:
name: webhook-sensor
spec:
dependencies:
- name: webhook-dep
eventSourceName: webhook
eventName: example
triggers:
- template:
name: log-trigger
log:
message: "{{ .webhook-dep.body }}"

Answer:

apiVersion: argoproj.io/v1alpha1
kind: Rollout
metadata:
name: myapp
spec:
replicas: 10
strategy:
canary:
canaryService: myapp-canary
stableService: myapp-stable
trafficRouting:
nginx:
stableIngress: myapp-ingress
steps:
- setWeight: 10
- pause: {duration: 10m}
- setWeight: 20
- pause: {duration: 10m}
- setWeight: 100
selector:
matchLabels:
app: myapp
template:
metadata:
labels:
app: myapp
spec:
containers:
- name: myapp
image: myapp:latest

Answer:

apiVersion: tekton.dev/v1beta1
kind: TaskRun
metadata:
name: mytaskrun
spec:
taskRef:
name: mytask
params:
- name: image
value: gcr.io/myproject/myimage

Answer:

Terminal window
# Install
jx install --provider=kubernetes
# Create quickstart
jx create quickstart
# Promote
jx promote myapp --version 1.0.0 --environment production

Answer:

# Pipeline
application: myapp
pipeline:
stages:
- name: Deploy
type: deployManifest
manifests:
- manifest:
apiVersion: apps/v1
kind: Deployment
spec:
replicas: 3

Q658: How do you use ArgoCD Image Updater?

Section titled “Q658: How do you use ArgoCD Image Updater?”

Answer:

Terminal window
# Install
kubectl apply -f https://raw.githubusercontent.com/argoproj-labs/argocd-image-updater/stable/deploy/overlay.yaml
# Configure
apiVersion: argoproj.io/v1alpha1
kind: ArgoCDImageUpdater
metadata:
name: myapp
spec:
argocd:
namespace: argocd
images:
- nginx:latest
updateStrategy: latest

Answer:

Terminal window
# Install
docker run -it -p 7000:7000 backstage
# Create plugin
backstage-cli create-plugin --name my-plugin
# Add to app
# app-config.yaml
catalog:
locations:
- type: url
target: https://github.com/org/repo/catalog-info.yaml

Q660: How do you use Service Mesh Interface?

Section titled “Q660: How do you use Service Mesh Interface?”

Answer:

Terminal window
# Install SMI
kubectl apply -f https://github.com/smi/smi-adaptor-istio/releases/download/v0.6.0/install.yaml
# Use traffic split
kubectl apply -f - <<EOF
apiVersion: split.smi-spec.io/v1alpha2
kind: TrafficSplit
metadata:
name: myapp-split
spec:
service: myapp
subsets:
- weight: 90
Service: myapp-v1
- weight: 10
Service: myapp-v2
EOF

Answer:

Terminal window
# Install CLI
wget -q https://raw.githubusercontent.com/dapr/cli/master/install.sh | /bin/bash
# Initialize
dapr init --kubernetes
# Deploy app
kubectl apply -f deploy.yaml
# Invoke
dapr invoke --app-id myapp --method mymethod --payload '{}'

Answer:

apiVersion: keda.sh/v1alpha1
kind: ScaledObject
metadata:
name: myapp-scaler
spec:
scaleTargetRef:
name: myapp
pollingInterval: 15
cooldownPeriod: 300
minReplicaCount: 0
maxReplicaCount: 10
triggers:
- type: prometheus
metadata:
serverAddress: http://prometheus:9090
metricName: http_requests_per_second
threshold: "100"

Answer:

apiVersion: serving.knative.dev/v1
kind: Service
metadata:
name: myapp
spec:
template:
spec:
containers:
- image: gcr.io/knative-samples/helloworld-go
ports:
- containerPort: 8080
resources:
limits:
cpu: "1000m"
memory: "256Mi"
requests:
cpu: "250m"
memory: "128Mi"

Answer:

Terminal window
# Install
kubectl apply -f https://github.com/knative/net-kourier/releases/download/v0.26.0/kourier.yaml
# Configure
kubectl patch configmap/config-network \
--namespace knative-serving \
--type merge \
--patch '{"data":{"kourier-enabled":"true"}}'

Answer:

apiVersion: serving.kserve.io/v1beta1
kind: InferenceService
metadata:
name: sklearn-iris
spec:
predictor:
model:
modelFormat:
name: sklearn
storageUri: "s3://bucket/sklearn/mymodel"

Answer:

apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: seldon-model
spec:
predictors:
- graph:
name: classifier
implementation: SKLEARN_SERVER
modelUri: "s3://bucket/model"
name: default
replicas: 1

Answer:

import bentoml
import pandas as pd
@bentoml.service(
resources={"cpu": "2"},
traffic={"timeout": 60}
)
class MyService:
@bentoml.api
def predict(self, df: pd.DataFrame) -> list:
# Your prediction logic
return predictions

Q668: How do you use Triton Inference Server?

Section titled “Q668: How do you use Triton Inference Server?”

Answer:

Terminal window
# Run
docker run --gpus=1 -p 8000:8000 -v /model/repository:/models nvcr.io/nvidia/tritonserver:latest \
--model-repository=/models
# Config
# config.pbtxt
name: "mymodel"
platform: "tensorrt_plan"
max_batch_size: 32
input [
{
name: "input"
data_type: TYPE_FP32
dims: [ -1, 3, 224, 224 ]
}
]
output [
{
name: "output"
data_type: TYPE_FP32
dims: [ 1000 ]
}
]

Answer:

Terminal window
# Run server
mlflow server --backend-store-uri sqlite:///mlflow.db --default-artifact-root ./artifacts
# Track
import mlflow
mlflow.set_experiment("myexperiment")
with mlflow.start_run():
mlflow.log_param("alpha", 0.5)
mlflow.log_metric("rmse", 0.1)
mlflow.log_artifact("model.pkl")

Answer:

# Install
kubectl apply -f https://github.com/kubeflow/manifests.git/v1.6-branch/kfdef/kfctl_k8s_istio.v1.6.0.yaml
# Pipeline
apiVersion: kubeflow.org/v1beta1
kind: PipelineRun
metadata:
name: my-pipeline-run
spec:
pipelineRef:
name: my-pipeline
params:
- name: param1
value: value1

Answer:

Terminal window
# Install
kubectl apply -f https://github.com/weaveworks/weave-gitops/releases/download/v0.9.0/website-and-core.yaml
# Get password
kubectl get secret -n weave-system weave-gitops-credentials -o jsonpath='{.data.admin}' | base64 -d

Answer:

Terminal window
# Install
port check --port 3003
# Add integration
port integrate github --owner myorg --repo myrepo
# Create entity
port create entity --blueprint myblueprint --identifier myentity

Answer:

apiVersion: scaffolder.backstage.io/v1beta3
kind: Template
metadata:
name: my-template
spec:
owner: myteam
type: service
parameters:
- title: Name
properties:
name:
type: string
steps:
- id: fetch-template
action: fetch:template
name: Fetch template
input:
url: ./template
values:
name: ${{ parameters.name }}

Q674: How do you use Crossplane composition?

Section titled “Q674: How do you use Crossplane composition?”

Answer:

apiVersion: apiextensions.crossplane.io/v1
kind: Composition
metadata:
name: composition.example.com
spec:
compositeTypeRef:
apiVersion: example.com/v1alpha1
kind: Database
patchSets:
- name: metadata
patches:
- fromFieldPath: metadata.labels
toFieldPath: metadata.labels
resources:
- name: rds
base:
apiVersion: rds.aws.upbound.io/v1beta1
kind: DBInstance
spec:
forProvider:
engine: postgres
instanceClass: db.t3.micro

Q675: How do you use Pulumi Kubernetes operator?

Section titled “Q675: How do you use Pulumi Kubernetes operator?”

Answer:

apiVersion: Pulumi.com/v1
kind: Program
metadata:
name: my-stack
spec:
stack: production
repository:
url: https://github.com/myorg/infra
commit: main
destroyOnFinalDeletion: false

Answer:

import { Construct } from 'constructs';
import { App, Chart, YamlOutputType } from 'cdk8s';
export class MyChart extends Chart {
constructor(scope: Construct, id: string) {
super(scope, id);
new KubeDeployment(this, 'deployment', {
spec: {
replicas: 3,
selector: {
matchLabels: { app: 'myapp' }
},
template: {
metadata: { labels: { app: 'myapp' } },
spec: {
containers: [{
name: 'myapp',
image: 'nginx:latest',
ports: [{ containerPort: 80 }]
}]
}
}
}
});
}
}
const app = new App({ outputType: YamlOutputType.FILE });
new MyChart(app, 'mychart');
app.synth();

Answer:

import { Construct } from 'constructs';
import { App, TerraformStack, TerraformOutput } from 'cdk.tf';
import * as Aws from '@cdk.tf/provider/aws';
class MyStack extends TerraformStack {
constructor(scope: Construct, id: string) {
super(scope, id);
new Aws.provider.AwsProvider(this, 'aws', {
region: 'us-east-1'
});
const instance = new Aws.instance.Instance(this, 'web', {
ami: 'ami-0c55b159cbfafe1f0',
instanceType: 't3.micro'
});
new TerraformOutput(this, 'publicIp', {
value: instance.publicIp
});
}
}
new App().synth();

Answer:

project = "myproject"
app "web" {
build {
use "docker" {}
}
deploy {
use "kubernetes" {
namespace = "default"
}
}
release {
use "docker" {}
}
}

Answer:

Terminal window
# Install
nh install --kubeconfig ~/.kube/config
# Add service
nh add myapp --install 8080
# Dev mode
nh dev myapp --image myapp:dev
# Forward
nh port-forward myapp 8080:8080

Answer:

kind: Project
name: myproject
environments:
- name: local
defaultNamespace: default
- name: dev
providers:
- name: kubernetes
namespace: dev
buildMode: kaniko
providers:
- name: kubernetes
modules:
- name: api
type: container
build:
command: docker build -t myproject/api .
deploy:
kubectl:
manifests:
- manifest/deploy.yaml
files:
- manifest/configmap.yaml

Q681: How do you configure system logging?

Section titled “Q681: How do you configure system logging?”

Answer:

/etc/rsyslog.conf
# Module
$ModLoad imuxsock
$ModLoad imjournal
# Template
$template RemoteLogs,"/var/log/%HOSTNAME%/%PROGRAMNAME%.log"
# Forward all
*.* @@syslog.example.com:514

Answer:

/etc/audit/auditd.conf
log_file = /var/log/audit/audit.log
max_log_file = 100
max_log_file_action = ROTATE
# /etc/audit/rules.d/audit.rules
-w /etc/passwd -p wa -k passwd_changes
-w /etc/shadow -p wa -k shadow_changes
-w /etc/sudoers -p wa -k sudoers_changes

Q683: How do you configure secure logging?

Section titled “Q683: How do you configure secure logging?”

Answer:

/etc/syslog-ng/syslog-ng.conf
# Use syslog-ng
source s_system { system(); };
destination d_secure { file("/var/log/secure"); };
filter f_secure { facility(auth, authpriv); };
log { source(s_system); filter(f_secure); destination(d_secure); };

Answer:

Terminal window
# GoAccess
goaccess /var/log/nginx/access.log -o /var/www/html/report.html --real-time-html
# Splunk forwarder
# Install and configure
/opt/splunkforwarder/bin/splunk start
# ELK
filebeat modules enable nginx
filebeat modules enable system

Q685: How do you configure audit for compliance?

Section titled “Q685: How do you configure audit for compliance?”

Answer:

Terminal window
# CIS benchmark rules
auditctl -w /etc/passwd -p wa -k cis_passwd
auditctl -w /etc/shadow -p wa -k cis_shadow
auditctl -w /etc/group -p wa -k cis_group
auditctl -w /etc/gshadow -p wa -k cis_gshadow
auditctl -w /etc/sudoers -p wa -k cis_sudoers
# PCI-DSS
auditctl -w /var/log/audit/ -p wa -k pci_audit
auditctl -w /etc/pam.d/ -p wa -k pci_pam

Q686: How do you implement defense in depth?

Section titled “Q686: How do you implement defense in depth?”

Answer:

Terminal window
# Layer 1: Network
# Firewall, segmentation, IDS/IPS
# Layer 2: Host
# SELinux/AppArmor, hardening, patching
# Layer 3: Application
# Input validation, secure coding
# Layer 4: Data
# Encryption, access control
# Layer 5: Monitoring
# SIEM, logging, alerting

Q687: How do you configure intrusion detection?

Section titled “Q687: How do you configure intrusion detection?”

Answer:

/etc/snort/snort.conf
# Network IDS (Snort)
var HOME_NET 192.168.1.0/24
alert tcp any any -> $HOME_NET any (msg:"Suspicious traffic"; sid:1000001;)
# Host IDS (AIDE)
aide --update
# File integrity (Tripwire)
twadmin --create-polfile /etc/tripwire/tw.pol
tripwire --init

Answer:

Terminal window
# Rootless
usermod -r -s /usr/sbin/nologin dockremap
# Seccomp
docker run --security-opt seccomp=default nginx
# AppArmor
docker run --security-opt apparmor=profile nginx
# Capabilities
docker run --cap-drop ALL --cap-add NET_BIND_SERVICE nginx

Answer:

Terminal window
# RBAC
kubectl create rolebinding admin --clusterrole=admin --group=developers
# Network policies
kubectl apply -f network-policy.yaml
# Secrets encryption
# Enable encryption at rest
# Pod security
# Use PodSecurityPolicy or Admission Controller

Answer:

/etc/sysctl.conf
# Disable unused filesystems
install cramfs /bin/true
install squashfs /bin/true
install vfat /bin/true
# Network hardening
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.icmp_echo_ignore_broadcasts = 1

Answer:

Terminal window
# Identity verification
# mTLS for service communication
# JWT tokens with short expiry
# Network segmentation
# Microsegmentation with Calico/Cilium
# Device trust
# Certificate-based authentication
# Continuous verification
# Real-time risk assessment

Q692: How do you implement secrets management?

Section titled “Q692: How do you implement secrets management?”

Answer:

Terminal window
# HashiCorp Vault
# Install
vault server -dev
# Store secret
vault kv put secret/myapp db_password=secret123
# Use in Kubernetes
# External Secrets Operator
# AWS Secrets Manager
aws secretsmanager create-secret --name myapp/db-password --secret-string '{"username":"admin","password":"secret"}'

Answer:

Terminal window
# Create CA
openssl genrsa -out ca.key 4096
openssl req -x509 -new -nodes -key ca.key -sha256 -days 3650 -out ca.crt
# Issue certificate
openssl genrsa -out server.key 2048
openssl req -new -key server.key -out server.csr
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt -days 365 -sha256

Q694: How do you implement TLS everywhere?

Section titled “Q694: How do you implement TLS everywhere?”

Answer:

Terminal window
# Nginx
server {
listen 443 ssl http2;
ssl_certificate /etc/ssl/certs/server.crt;
ssl_certificate_key /etc/ssl/private/server.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
}
# Database
# PostgreSQL: ssl = on
# MySQL: require ssl

Q695: How do you implement network segmentation?

Section titled “Q695: How do you implement network segmentation?”

Answer:

Terminal window
# VLANs
vconfig add eth0 100 # DMZ
vconfig add eth0 200 # Internal
# Firewalls
# Perimeter
# Application
# Database
# Kubernetes namespaces
kubectl create namespace production
kubectl label namespace production env=production

Q696: How do you implement backup strategy?

Section titled “Q696: How do you implement backup strategy?”

Answer:

Terminal window
# 3-2-1 Rule
# 3 copies of data
# 2 different media types
# 1 offsite
# Backup types
# Full: daily
# Incremental: every 4 hours
# Archive: monthly
# Test restore
# Monthly

Q697: How do you implement disaster recovery?

Section titled “Q697: How do you implement disaster recovery?”

Answer:

Terminal window
# RTO (Recovery Time Objective)
# How long can we be down?
# RPO (Recovery Point Objective)
# How much data loss is acceptable?
# DR site
# Active-passive
# Active-active
# Failover
# DNS failover
# Load balancer health checks

Q698: How do you implement high availability?

Section titled “Q698: How do you implement high availability?”

Answer:

Terminal window
# Load balancer
# HAProxy, NGINX
# Clustering
# Pacemaker/Corosync
# Keepalived
# Database
# Master-slave
# Multi-master
# Application
# Stateless design
# Session affinity

Q699: How do you implement capacity planning?

Section titled “Q699: How do you implement capacity planning?”

Answer:

Terminal window
# Monitor trends
# CPU, Memory, Disk, Network
# Growth rate
# Monthly review
# Planning
# Add capacity at 70% utilization
# Auto-scaling
# Cloud native: Cluster Autoscaler
# HPA/VPA

Q700: How do you implement incident response?

Section titled “Q700: How do you implement incident response?”

Answer:

Terminal window
# Preparation
# Runbooks
# Contact list
# Detection
# Monitoring alerts
# Log analysis
# Containment
# Isolate affected systems
# Block attacker
# Eradication
# Remove malware
# Patch vulnerabilities
# Recovery
# Restore from backup
# Verify system integrity
# Lessons learned
# Post-incident review

Continue with questions 701-1000 covering more advanced topics…