Linux Disk Management
Disk partitioning, LVM, and filesystem management.
Disk Fundamentals
# BLOCK DEVICE INSPECTION
# lsblk - the go-to command for disk layout
lsblk # Basic tree view
lsblk -f # Show filesystems
lsblk -o NAME,SIZE,TYPE,MOUNTPOINT,FSTYPE,UUID # Custom columns
lsblk -J # JSON output for scripting
# WHY THESE MATTER:
# - NAME: device name (sda, nvme0n1p1)
# - TYPE: disk, part, lvm, crypt
# - FSTYPE: ext4, xfs, btrfs, swap
# - UUID: persistent identifier (use in fstab!)
# blkid - filesystem metadata
blkid # All devices
blkid /dev/sda1 # Specific device
blkid -o export /dev/sda1 # Shell-friendly output
# Parse blkid for automation
eval $(blkid -o export /dev/sda1)
echo "Device $DEVNAME has UUID $UUID and type $TYPE"
# fdisk/parted - partition inspection
fdisk -l /dev/sda # MBR/GPT partitions
parted /dev/sda print # Better for GPT
gdisk -l /dev/sda # GPT-specific
# DISK USAGE ANALYSIS
df -h # Human readable
df -i # Inode usage (can fill up!)
df -T # Show filesystem types
df -h / /home /var | column -t # Specific filesystems
# TIP: Inode exhaustion happens with millions of small files
# Check: df -i | awk '$5 > "80%" {print $6 " inode usage: " $5}'
# du - directory sizes with awk patterns
du -sh /var/log # Summary of one dir
du -h --max-depth=1 /var | sort -hr # One level, sorted
du -ah /var/log | sort -hr | head -20 # Top 20 files/dirs
# PRODUCTION: Find largest directories
du -xh --max-depth=2 / 2>/dev/null | sort -hr | head -20
# -x: stay on same filesystem (don't cross mounts)
# ncdu - interactive disk usage (better than du for exploration)
ncdu /var # Navigate with arrows
ncdu -x / # Don't cross filesystems
Mount Operations
# MOUNT BASICS
mount # Show all mounts
mount | column -t # Formatted
mount -t ext4 # Filter by type
findmnt # Tree view
findmnt -t nfs,nfs4 # NFS mounts only
# MOUNT A DEVICE
mount /dev/sdb1 /mnt # Basic mount
mount -o ro /dev/sdb1 /mnt # Read-only
mount -o noexec,nosuid /dev/sdb1 /mnt # Security options
# MOUNT OPTIONS THAT MATTER:
# noexec - Can't execute binaries (security)
# nosuid - Ignore setuid bits (security)
# nodev - No device files (security)
# noatime - Don't update access times (performance)
# defaults - rw,suid,dev,exec,auto,nouser,async
# BIND MOUNTS - share directories
mount --bind /source /destination # Bind mount
mount --rbind /source /dest # Recursive bind
# TIP: Bind mounts are useful for chroots and containers
# REMOUNT WITH NEW OPTIONS
mount -o remount,ro /mnt # Make read-only
mount -o remount,rw / # Make root writable (recovery)
# UNMOUNT
umount /mnt # Basic unmount
umount -l /mnt # Lazy unmount (when busy)
umount -f /mnt # Force (NFS only)
# FIND WHAT'S USING A MOUNT
lsof +D /mnt # Open files
fuser -vm /mnt # Processes using mount
fuser -km /mnt # Kill processes using mount
# PERSISTENT MOUNTS - /etc/fstab
# ALWAYS use UUID, not device names!
cat <<'EOF'
# <filesystem> <mountpoint> <type> <options> <dump> <pass>
UUID=abc123-... /home ext4 defaults,noatime 0 2
UUID=def456-... /var/log ext4 defaults,noexec,nosuid 0 2
10.50.1.70:/volume1/k3s /mnt/k3s nfs4 defaults,_netdev 0 0
//nas-01/backup /mnt/smb cifs credentials=/root/.smbcreds,uid=1000 0 0
EOF
# RELOAD FSTAB
mount -a # Mount all fstab entries
systemctl daemon-reload # If using systemd mount units
# SYSTEMD MOUNT UNITS (alternative to fstab)
# /etc/systemd/system/mnt-k3s.mount
cat <<'EOF'
[Unit]
Description=k3s NFS Storage
After=network-online.target
Wants=network-online.target
[Mount]
What=10.50.1.70:/volume1/k3s
Where=/mnt/k3s
Type=nfs4
Options=defaults,_netdev,nofail
[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now mnt-k3s.mount
LVM Operations
# LVM HIERARCHY:
# Physical Volumes (PV) → Volume Groups (VG) → Logical Volumes (LV)
# Disk → Pool → Partitions (conceptually)
# INSPECTION
pvs # Physical volumes
vgs # Volume groups
lvs # Logical volumes
pvdisplay /dev/sdb1 # PV details
vgdisplay vg_data # VG details
lvdisplay /dev/vg_data/lv_home # LV details
# COMBINED VIEW
lsblk | grep -E 'NAME|lvm' # LVMs in tree
dmsetup ls # Device mapper view
# CREATE LVM STACK
# Step 1: Create Physical Volume
pvcreate /dev/sdb1 # Single disk
pvcreate /dev/sd{c,d}1 # Multiple disks
# Step 2: Create Volume Group
vgcreate vg_data /dev/sdb1 # Create VG
vgextend vg_data /dev/sdc1 # Add disk to VG
# Step 3: Create Logical Volume
lvcreate -L 50G -n lv_home vg_data # Fixed size
lvcreate -l 100%FREE -n lv_data vg_data # Use all space
lvcreate -l 50%VG -n lv_var vg_data # 50% of VG
# Step 4: Format and mount
mkfs.ext4 /dev/vg_data/lv_home
mount /dev/vg_data/lv_home /home
# EXTEND LOGICAL VOLUME (most common operation)
# Check available space first
vgs # Check VG free space
# Extend LV
lvextend -L +10G /dev/vg_data/lv_home # Add 10GB
lvextend -l +100%FREE /dev/vg_data/lv_home # Use all free
# Extend filesystem (AFTER lvextend)
resize2fs /dev/vg_data/lv_home # ext4
xfs_growfs /dev/vg_data/lv_home # xfs (uses mount point)
# ONE COMMAND (extend both)
lvextend -r -L +10G /dev/vg_data/lv_home # -r resizes fs too
# REDUCE LOGICAL VOLUME (DANGEROUS - backup first!)
# Only ext4 supports online shrink - xfs cannot shrink!
umount /home # Must unmount
e2fsck -f /dev/vg_data/lv_home # Check filesystem
resize2fs /dev/vg_data/lv_home 40G # Shrink fs FIRST
lvreduce -L 40G /dev/vg_data/lv_home # Then shrink LV
mount /home
# LVM SNAPSHOTS
# Create snapshot (great for backups)
lvcreate -s -n lv_home_snap -L 5G /dev/vg_data/lv_home
# Mount snapshot read-only
mount -o ro /dev/vg_data/lv_home_snap /mnt/snap
# Backup from snapshot
tar -cvzf /backup/home.tar.gz -C /mnt/snap .
# Remove snapshot when done
lvremove /dev/vg_data/lv_home_snap
# LVM THIN PROVISIONING (overcommit storage)
# Create thin pool
lvcreate -L 100G --thinpool tp_data vg_data
# Create thin volumes (can exceed pool size)
lvcreate -V 50G --thin -n lv_vm1 vg_data/tp_data
lvcreate -V 50G --thin -n lv_vm2 vg_data/tp_data
lvcreate -V 50G --thin -n lv_vm3 vg_data/tp_data # 150G allocated, 100G pool!
# Monitor thin pool usage
lvs -o +lv_size,data_percent,metadata_percent vg_data/tp_data
Disk Health Monitoring
# SMART MONITORING (Self-Monitoring, Analysis and Reporting Technology)
# Install: pacman -S smartmontools / apt install smartmontools
# Check SMART support
smartctl -i /dev/sda # Info
smartctl -H /dev/sda # Health status (PASSED/FAILED)
# View SMART attributes
smartctl -A /dev/sda # All attributes
smartctl -A /dev/sda | grep -E 'Reallocated|Pending|Current_Pending|Offline_Uncorrectable'
# CRITICAL ATTRIBUTES TO WATCH:
# 5 Reallocated_Sector_Ct - Bad sectors remapped (>0 = drive failing)
# 187 Reported_Uncorrect - Uncorrectable errors
# 197 Current_Pending_Sector - Sectors waiting to be remapped
# 198 Offline_Uncorrectable - Can't be remapped
# Run SMART tests
smartctl -t short /dev/sda # Quick test (~2 min)
smartctl -t long /dev/sda # Thorough test (~hours)
smartctl -l selftest /dev/sda # View test results
# NVMe drives use different commands
nvme smart-log /dev/nvme0 # NVMe SMART data
nvme list # List NVMe devices
# BADBLOCKS - surface scan (DESTRUCTIVE write test!)
# Read-only test (safe)
badblocks -sv /dev/sdb # Non-destructive read
# Write test (DESTROYS DATA)
badblocks -wsv /dev/sdb # Write test (wipes disk!)
# DISK I/O MONITORING
iostat -x 1 # Extended stats, 1 sec interval
iotop # Top for I/O
iotop -o # Only show active I/O
# Key iostat columns:
# %util - Disk utilization (100% = saturated)
# await - Average I/O wait time (ms)
# r/s, w/s - Reads/writes per second
# INFRASTRUCTURE: Monitor all disks
for disk in /dev/sd?; do
echo "=== $disk ==="
smartctl -H "$disk" 2>/dev/null | grep -E 'result:|SMART overall'
smartctl -A "$disk" 2>/dev/null | awk '/Reallocated|Pending/ {print " "$2": "$NF}'
done
# SCHEDULE SMART TESTS (systemd timer or cron)
# /etc/smartd.conf
# /dev/sda -a -o on -S on -s (S/../.././02|L/../../6/03)
# Short test daily at 2am, Long test Saturdays at 3am
systemctl enable --now smartd # Enable smartd daemon
NFS Operations
# NFS CLIENT OPERATIONS
# Install: pacman -S nfs-utils / apt install nfs-common
# Show NFS exports from server
showmount -e nas-01.inside.domusdigitalis.dev
showmount -e 10.50.1.70
# Mount NFS share
mount -t nfs4 10.50.1.70:/volume1/k3s /mnt/k3s
mount -t nfs 10.50.1.70:/volume1/backup /mnt/backup -o vers=3
# NFS mount options
mount -t nfs4 10.50.1.70:/volume1/k3s /mnt/k3s \
-o rw,hard,timeo=600,retrans=2,_netdev
# OPTIONS EXPLAINED:
# hard - Retry indefinitely (vs soft: give up)
# timeo=600 - Timeout 60 seconds
# retrans=2 - Retry 2 times before timeout
# _netdev - Wait for network before mounting
# nofail - Boot continues if mount fails
# PERSISTENT NFS MOUNTS (fstab)
# /etc/fstab entry
echo '10.50.1.70:/volume1/k3s /mnt/k3s nfs4 defaults,_netdev,nofail 0 0' >> /etc/fstab
# AUTOFS - mount on demand (better for workstations)
# Install: pacman -S autofs
# /etc/auto.master
echo '/mnt/nfs /etc/auto.nfs --timeout=300' >> /etc/auto.master
# /etc/auto.nfs
echo 'k3s -fstype=nfs4,rw 10.50.1.70:/volume1/k3s' >> /etc/auto.nfs
systemctl enable --now autofs
# Now /mnt/nfs/k3s auto-mounts when accessed
# NFS SERVER SETUP (Rocky/RHEL)
dnf install nfs-utils
systemctl enable --now nfs-server
# /etc/exports
cat <<'EOF'
/export/k3s 10.50.1.0/24(rw,sync,no_subtree_check,no_root_squash)
/export/backup 10.50.1.0/24(ro,sync,no_subtree_check)
EOF
exportfs -ra # Reload exports
exportfs -v # Verify exports
# NFS TROUBLESHOOTING
# Check if NFS is working
rpcinfo -p 10.50.1.70 # RPC services
nfsstat -c # Client stats
nfsstat -s # Server stats
# Debug mount issues
mount -v -t nfs4 10.50.1.70:/volume1/k3s /mnt/k3s
dmesg | tail -20 # Kernel messages
# Check firewall (server side)
firewall-cmd --list-services | grep -E 'nfs|mountd|rpc-bind'
# INFRASTRUCTURE: k3s NFS provisioner verification
# From k3s node:
ssh k3s-master-01 "df -h | grep nfs"
ssh k3s-master-01 "mount | grep nfs"
# Synology NAS-specific
# DSM Control Panel → File Services → NFS → Enable
# Shared Folder → Edit → NFS Permissions → Create rule
Filesystem Operations
# CREATE FILESYSTEMS
mkfs.ext4 /dev/sdb1 # ext4 (most common)
mkfs.xfs /dev/sdb1 # XFS (RHEL default)
mkfs.btrfs /dev/sdb1 # Btrfs (snapshots, compression)
# EXT4 OPTIONS
mkfs.ext4 -L "data" /dev/sdb1 # With label
mkfs.ext4 -m 1 /dev/sdb1 # Reserve 1% (vs 5% default)
mkfs.ext4 -T largefile /dev/sdb1 # Optimize for large files
# XFS OPTIONS
mkfs.xfs -L "data" /dev/sdb1 # With label
mkfs.xfs -f /dev/sdb1 # Force (overwrite existing)
# FILESYSTEM LABELS
e2label /dev/sdb1 # Show ext4 label
e2label /dev/sdb1 "mydata" # Set ext4 label
xfs_admin -L "mydata" /dev/sdb1 # Set XFS label
blkid -s LABEL /dev/sdb1 # Show any filesystem label
# Mount by label
mount LABEL=mydata /mnt
# FILESYSTEM CHECK
# NEVER run on mounted filesystem!
umount /dev/sdb1
fsck /dev/sdb1 # Auto-detect type
e2fsck -f /dev/sdb1 # Force ext4 check
xfs_repair /dev/sdb1 # XFS repair
# Check without fixing
e2fsck -n /dev/sdb1 # Read-only check
# TUNE FILESYSTEM
tune2fs -l /dev/sdb1 # Show ext4 parameters
tune2fs -m 1 /dev/sdb1 # Set reserved to 1%
tune2fs -c 30 /dev/sdb1 # Check every 30 mounts
tune2fs -i 30d /dev/sdb1 # Check every 30 days
# RESIZE FILESYSTEM
# ext4 - can grow online, shrink requires unmount
resize2fs /dev/vg_data/lv_home # Grow to LV size
resize2fs /dev/sdb1 50G # Shrink to 50G (unmount first!)
# XFS - can only grow, cannot shrink
xfs_growfs /mount/point # Grow to device size
# SWAP
mkswap /dev/sdb2 # Create swap
swapon /dev/sdb2 # Enable swap
swapoff /dev/sdb2 # Disable swap
# Swap file (no partition needed)
fallocate -l 4G /swapfile
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
echo '/swapfile none swap sw 0 0' >> /etc/fstab
# Check swap
swapon --show
free -h
# TMPFS (RAM-based filesystem)
mount -t tmpfs -o size=2G tmpfs /mnt/ramdisk
# Add to fstab: tmpfs /mnt/ramdisk tmpfs size=2G 0 0
Infrastructure Storage Patterns
# KVM HOST STORAGE (kvm-01)
# CRITICAL: Root is only 14GB - VMs must go to SSD!
# Check where VM images are
virsh pool-list --all
virsh pool-info images
virsh vol-list images
# Correct storage paths
# /dev/sda (root): OS only - 14GB
# /mnt/onboard-ssd/libvirt/images/: All VM disk images
# Find VMs on wrong storage
for vm in $(virsh list --all --name); do
virsh domblklist "$vm" 2>/dev/null | awk 'NR>2 && $2 !~ /mnt/ && $2 != "-" {print "'$vm':", $2}'
done
# NAS BACKUP SHARES (nas-01)
# Backup shares (config snapshots):
# /ise_backups, /wlc_backups, /firewall_backups, /switch_backups
# /kvm_backups, /vault_backups, /borg_backups
# k3s runtime shares (persistent volumes):
# /k3s/prometheus, /k3s/grafana, /k3s/wazuh, /k3s/minio
# Create k3s share via SSH (Synology)
ssh nas-01 "mkdir -p /volume1/k3s/{prometheus,grafana,minio,argocd,wazuh}"
ssh nas-01 "chown -R 1000:1000 /volume1/k3s"
# Verify NFS exports from k3s node
ssh k3s-master-01 "showmount -e nas-01.inside.domusdigitalis.dev"
# DISK SPACE MONITORING
# Quick check across infrastructure
for host in kvm-01 vault-01 ise-01 bind-01; do
echo "=== $host ==="
ssh "$host" "df -h / /var 2>/dev/null | tail -n +2" 2>/dev/null || echo " (unreachable)"
done
# Alert on >80% usage
for host in kvm-01 vault-01 ise-01 bind-01 nas-01; do
ssh "$host" "df -h 2>/dev/null" 2>/dev/null | awk -v h="$host" '
NR>1 && $5+0 > 80 {print h ":" $6 " is " $5 " full"}'
done
# LVM ON KVM HOSTS
# Typical layout:
# vg_system: OS logical volumes
# vg_vms: VM storage (thin provisioned)
ssh kvm-01 "vgs; echo '---'; lvs -o lv_name,vg_name,lv_size,data_percent"
# BORG BACKUP REPOSITORY (workstation backups)
# Repository on NAS
export BORG_REPO='ssh://nas-01/volume1/borg_backups/modestus-razer'
# Check backup status
borg list
borg info ::$(borg list --last 1 --format '{archive}')
# Prune old backups
borg prune --keep-daily=7 --keep-weekly=4 --keep-monthly=6
# VAULT BACKUP (Raft snapshots)
# Automated backup to NAS
ssh vault-01 "vault operator raft snapshot save /tmp/vault-$(date +%Y%m%d).snap"
scp vault-01:/tmp/vault-*.snap /mnt/vault_backups/
Disk Gotchas
# WRONG: Using device names in fstab
# Device names can change between boots!
echo '/dev/sdb1 /data ext4 defaults 0 2' >> /etc/fstab # DON'T
# CORRECT: Use UUID
UUID=$(blkid -s UUID -o value /dev/sdb1)
echo "UUID=$UUID /data ext4 defaults 0 2" >> /etc/fstab
# WRONG: Shrinking XFS
# XFS cannot shrink - only grow!
xfs_growfs /mount/point # Works
# There is no xfs_shrink # Doesn't exist
# CORRECT: If you need smaller XFS
# 1. Backup data
# 2. Create smaller partition/LV
# 3. Format and restore
# WRONG: Running fsck on mounted filesystem
fsck /dev/sda1 # DANGEROUS if mounted!
# CORRECT: Unmount first, or use -n for read-only
umount /dev/sda1
fsck /dev/sda1
# WRONG: Forgetting -r with lvextend
lvextend -L +10G /dev/vg/lv # Only extends LV
# Filesystem is still old size!
# CORRECT: Extend both at once
lvextend -r -L +10G /dev/vg/lv # -r resizes fs too
# WRONG: Hard NFS mounts without timeout
mount -t nfs4 10.50.1.70:/share /mnt # If server dies, processes hang forever
# CORRECT: Use options
mount -t nfs4 10.50.1.70:/share /mnt -o hard,timeo=600,retrans=2
# WRONG: Not using _netdev for network mounts
# System hangs at boot waiting for network mount before network is up!
# CORRECT: Add _netdev option
echo '10.50.1.70:/share /mnt nfs4 defaults,_netdev,nofail 0 0' >> /etc/fstab
# WRONG: Creating swap on SSD without trim
mkswap /dev/ssd_partition # No TRIM support
# CORRECT: Enable discard for SSD swap
mkswap /dev/ssd_partition
swapon -d /dev/ssd_partition # -d enables discard
# Or in fstab: /dev/ssd swap swap discard 0 0
# WRONG: Thin pool 100% allocated
# VMs start failing with I/O errors!
# CORRECT: Monitor and alert before 80%
lvs -o lv_name,data_percent vg_data/tp_data | awk '$2 > 80'
Quick Reference
# INSPECTION
lsblk -f # Devices with filesystems
df -hT # Disk usage with types
mount | column -t # Current mounts
findmnt -t nfs4 # NFS mounts
# LVM QUICK
pvs; vgs; lvs # Overview
lvextend -r -L +10G /dev/vg/lv # Extend LV+FS
lvcreate -s -n snap -L 5G /dev/vg/lv # Snapshot
# HEALTH
smartctl -H /dev/sda # SMART health
iostat -x 1 5 # I/O stats
# NFS
showmount -e server # List exports
mount -t nfs4 server:/share /mnt # Mount
# FILESYSTEM
mkfs.ext4 -L label /dev/sdb1 # Create
resize2fs /dev/vg/lv # Grow ext4
xfs_growfs /mount # Grow XFS
fsck /dev/sdb1 # Check (unmounted!)
# SPACE ANALYSIS
du -xh --max-depth=2 / 2>/dev/null | sort -hr | head -20
ncdu /var # Interactive
# FSTAB TEMPLATE
# UUID=xxx /mount ext4 defaults,noatime 0 2
# server:/share /mnt nfs4 defaults,_netdev,nofail 0 0