Некоторые картинки не загружаются из РФ и РК, используйте VPN.

среда, 31 декабря 2025 г.

Resize disk with umount


# umount device
sudo umount /archive

# resize part
sudo parted /dev/sdb resizepart 1 100%
# This may cause a message that the disk size has changed and the GPT table will need to be fixed.

# resize fs
sudo resize2fs /dev/sdb1

# If you need to check the FS use this command
# sudo e2fsck -f /dev/sdb1


sudo mount /dev/sdb1 /archive

# check fstab
# reboot and check

Resize LVM without umount. Ubuntu 22.04


user@fnode3:~$ lsblk | tail -n 5
nvme0n1                   259:0    0 238,5G  0 disk
├─nvme0n1p1               259:1    0     1G  0 part  /boot/efi
├─nvme0n1p2               259:2    0     2G  0 part  /boot
└─nvme0n1p3               259:3    0 235,4G  0 part
  └─ubuntu--vg-ubuntu--lv 253:0    0   100G  0 lvm   /

As you can see the lvm volume doesn't use all allocated space


user@fnode3:~$ sudo lvextend -l +100%FREE /dev/ubuntu-vg/ubuntu-lv

user@fnode3:~$ lsblk | tail -n 5
nvme0n1                   259:0    0 238,5G  0 disk
├─nvme0n1p1               259:1    0     1G  0 part  /boot/efi
├─nvme0n1p2               259:2    0     2G  0 part  /boot
└─nvme0n1p3               259:3    0 235,4G  0 part
  └─ubuntu--vg-ubuntu--lv 253:0    0 235,4G  0 lvm   /

user@fnode3:~$ df -h | grep ubunt
Filesystem                         Size  Used Avail Use% Mounted on
tmpfs                              1,6G  3,1M  1,6G   1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv   98G  8,0G   85G   9% /

user@fnode3:~$ sudo resize2fs /dev/ubuntu-vg/ubuntu-lv
resize2fs 1.46.5 (30-Dec-2021)
Filesystem at /dev/ubuntu-vg/ubuntu-lv is mounted on /; on-line resizing required
old_desc_blocks = 13, new_desc_blocks = 30
The filesystem on /dev/ubuntu-vg/ubuntu-lv is now 61714432 (4k) blocks long.

user@fnode3:~$ df -h
Filesystem                         Size  Used Avail Use% Mounted on
tmpfs                              1,6G  3,1M  1,6G   1% /run
/dev/mapper/ubuntu--vg-ubuntu--lv  232G  8,0G  213G   4% /

вторник, 30 декабря 2025 г.



user@vrem:~$ cat /proc/mdstat
Personalities : [raid1] [linear] [multipath] [raid0] [raid6] [raid5] [raid4] [raid10]
md127 : inactive sdd1[4](S) sdf1[2](S) sdc1[0](S) sde1[1](S)
      15627538432 blocks super 1.2



root@vrem:/home/user# mdadm --examine /dev/sd[dcef]1 | grep -i even
         Events : 94226
         Events : 94226
         Events : 94230
         Events : 94230
         
root@vrem:/home/user# mdadm --stop /dev/md127
mdadm: stopped /dev/md127

root@vrem:/home/user# mdadm --assemble --force /dev/md127 /dev/sdc1 /dev/sde1  /dev/sdf1 /dev/sdd1 --verbose
mdadm: looking for devices for /dev/md127
mdadm: /dev/sdc1 is identified as a member of /dev/md127, slot 0.
mdadm: /dev/sde1 is identified as a member of /dev/md127, slot 1.
mdadm: /dev/sdf1 is identified as a member of /dev/md127, slot 2.
mdadm: /dev/sdd1 is identified as a member of /dev/md127, slot 3.
mdadm: forcing event count in /dev/sdc1(0) from 94226 up to 94230
mdadm: forcing event count in /dev/sdd1(3) from 94226 up to 94230
mdadm: added /dev/sde1 to /dev/md127 as 1
mdadm: added /dev/sdf1 to /dev/md127 as 2
mdadm: added /dev/sdd1 to /dev/md127 as 3
mdadm: added /dev/sdc1 to /dev/md127 as 0
mdadm: /dev/md127 has been started with 4 drives.

root@vrem:/home/user# cat /proc/mdstat
Personalities : [raid1] [linear] [multipath] [raid0] [raid6] [raid5] [raid4] [raid10]
md127 : active raid5 sdc1[0] sdd1[4] sdf1[2] sde1[1]
      11720653824 blocks super 1.2 level 5, 512k chunk, algorithm 2 [4/4] [UUUU]
      bitmap: 0/30 pages [0KB], 65536KB chunk

root@vrem:/home/user# cat /etc/mdadm/mdadm.conf
ARRAY /dev/md127 metadata=1.2 name=xen2:2 UUID=40c74af0:6cada18f:aa7bf55f:e7d03937

root@vrem:/home/user# mount /dev/md127 /WBACKUP/


sudo fdisk /dev/sda

g - create GPT for 2TB+
n - create new partition
1 - number of part, 1 is default
enter - start sector  (2048 is deafault)
enter - end sector (100% is default)
w - write changes
t - change type. it is recomendation
29 - Linux RAID (for list use L)
w - write changes
q - quit

Repeate these steps for each disk you want to use to create the new array. I use 6 disk for create RAID5


sudo mdadm --create --verbose /dev/md0 --level=5 --raid-devices=6 /dev/sd[abcdef]1
sudo cryptsetup --verbose --verify-passphrase --type=luks2 luksFormat /dev/md0
sudo cryptsetup luksOpen /dev/md0 md0_crypt
sudo mkfs.ext4 /dev/mapper/md0_crypt
sudo mkdir /BOX
sudo mount /dev/mapper/md0_crypt /BOX
sudo mdadm --detail --scan | sudo tee -a /etc/mdadm/mdadm.conf
sudo update-initramfs -u