root@kvmhost2:~# lvdisplay|more --- Logical volume --- LV Name vhd VG Name pve LV UUID 1canK4-K2Sd-oJLN-6m3A-i15o-37hL-WfT8xN LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:39 +0200 LV Pool metadata vhd_tmeta LV Pool data vhd_tdata LV Status NOT available LV Size 725.00 GiB Current LE 185600 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vm-100-disk-0 LV Name vm-100-disk-0 VG Name pve LV UUID c5boqJ-3W4s-yRdc-deTX-lXvi-Fp5N-Hos7wW LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-14 17:25:56 +0200 LV Pool name vhd LV Status NOT available LV Size 64.19 GiB Current LE 16433 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vm-101-disk-0 LV Name vm-101-disk-0 VG Name pve LV UUID vdyifN-OM6x-0OTf-olmO-VIpV-Yxmt-8ksWwG LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-20 14:53:00 +0200 LV Pool name vhd LV Status NOT available LV Size 20.06 GiB Current LE 5136 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vhd_meta0 LV Name vhd_meta0 VG Name pve LV UUID ePnSFc-4MXf-szYj-pgCe-lOpd-xDwL-v3g2yV LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:39 +0200 LV Status available # open 0 LV Size 92.00 MiB Current LE 23 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 252:0 --- Logical volume --- LV Path /dev/pve/vhd_meta1 LV Name vhd_meta1 VG Name pve LV UUID Oqv6ME-4ghD-ocC7-jF1c-ZTSJ-qnZk-26SrFo LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:38 +0200 LV Status available # open 0 LV Size 92.00 MiB Current LE 23 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 252:1 --- Logical volume --- LV Path /dev/pve/vhd_meta2 LV Name vhd_meta2 VG Name pve LV UUID 6j4hc8-CGNI-T9T0-1Jyf-PtNs-fekh-ubjlgJ LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-20 20:12:28 +0200 LV Status available # open 0 LV Size 92.00 MiB Current LE 23 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 252:2 root@kvmhost2:~# vgchange -a y Thin pool pve-vhd-tpool (252:5) transaction_id is 0, while expected 6. Thin pool pve-vhd-tpool (252:5) transaction_id is 0, while expected 6. Thin pool pve-vhd-tpool (252:5) transaction_id is 0, while expected 6. 3 logical volume(s) in volume group "pve" now active root@kvmhost2:~# vgcfgrestore --force -f test1.txt pve Volume group pve has active volume: vhd_meta0. Volume group pve has active volume: vhd_meta1. Volume group pve has active volume: vhd_meta2. WARNING: Found 3 active volume(s) in volume group "pve". Restoring VG with active LVs, may cause mismatch with its metadata. Do you really want to proceed with restore of volume group "pve", while 3 volume(s) are active? [y/n]: y WARNING: Forced restore of Volume Group pve with thin volumes. Restored volume group pve. root@kvmhost2:~# echo $? 0 root@kvmhost2:~# vgchange -a y Thin pool pve-vhd-tpool (252:5) transaction_id is 0, while expected 6. Thin pool pve-vhd-tpool (252:5) transaction_id is 0, while expected 6. Thin pool pve-vhd-tpool (252:5) transaction_id is 0, while expected 6. 2 logical volume(s) in volume group "pve" now active root@kvmhost2:~# lvconvert --repair /dev/pve/vhd Transaction id 6 from pool "pve/vhd" does not match repaired transaction id 0 from /dev/mapper/pve-lvol2_pmspare. WARNING: LV pve/vhd_meta2 holds a backup of the unrepaired metadata. Use lvremove when no longer required. root@kvmhost2:~# echo $? 0 root@kvmhost2:~# cp -p test1.txt test1a.txt root@kvmhost2:~# nano test1a.txt root@kvmhost2:~# diff test1.txt test1a.txt 53c53 < transaction_id = 6 --- > transaction_id = 0 root@kvmhost2:~# vgcfgrestore --force -f test1a.txt pve Volume group pve has active volume: vhd_meta0. Volume group pve has active volume: vhd_meta1. Volume group pve has active volume: vhd_meta2. WARNING: Found 3 active volume(s) in volume group "pve". Restoring VG with active LVs, may cause mismatch with its metadata. Do you really want to proceed with restore of volume group "pve", while 3 volume(s) are active? [y/n]: y WARNING: Forced restore of Volume Group pve with thin volumes. Restored volume group pve. root@kvmhost2:~# vgchange -a y device-mapper: reload ioctl on (252:6) failed: Keine Daten verfügbar device-mapper: reload ioctl on (252:6) failed: Keine Daten verfügbar 3 logical volume(s) in volume group "pve" now active root@kvmhost2:~# lslblk -bash: lslblk: Kommando nicht gefunden. root@kvmhost2:~# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS fd0 2:0 1 4K 0 disk sda 8:0 0 931.5G 0 disk ├─md126 9:126 0 931.5G 0 raid1 │ ├─md126p1 259:0 0 133.3M 0 part /dos │ ├─md126p2 259:1 0 800M 0 part /boot │ ├─md126p3 259:2 0 1K 0 part │ ├─md126p5 259:3 0 15G 0 part / │ ├─md126p6 259:4 0 150G 0 part /home │ ├─md126p7 259:5 0 10G 0 part /tmp │ ├─md126p8 259:6 0 8G 0 part [SWAP] │ ├─md126p9 259:7 0 20G 0 part /var │ └─md126p10 259:8 0 727.6G 0 part │ ├─pve-vhd_meta0 252:0 0 92M 0 lvm │ ├─pve-vhd_meta1 252:1 0 92M 0 lvm │ ├─pve-vhd_meta2 252:2 0 92M 0 lvm │ ├─pve-vhd_tmeta 252:3 0 92M 0 lvm │ │ └─pve-vhd 252:5 0 725G 0 lvm │ └─pve-vhd_tdata 252:4 0 725G 0 lvm │ └─pve-vhd 252:5 0 725G 0 lvm └─md127 9:127 0 0B 0 md sdb 8:16 0 931.5G 0 disk ├─md126 9:126 0 931.5G 0 raid1 │ ├─md126p1 259:0 0 133.3M 0 part /dos │ ├─md126p2 259:1 0 800M 0 part /boot │ ├─md126p3 259:2 0 1K 0 part │ ├─md126p5 259:3 0 15G 0 part / │ ├─md126p6 259:4 0 150G 0 part /home │ ├─md126p7 259:5 0 10G 0 part /tmp │ ├─md126p8 259:6 0 8G 0 part [SWAP] │ ├─md126p9 259:7 0 20G 0 part /var │ └─md126p10 259:8 0 727.6G 0 part │ ├─pve-vhd_meta0 252:0 0 92M 0 lvm │ ├─pve-vhd_meta1 252:1 0 92M 0 lvm │ ├─pve-vhd_meta2 252:2 0 92M 0 lvm │ ├─pve-vhd_tmeta 252:3 0 92M 0 lvm │ │ └─pve-vhd 252:5 0 725G 0 lvm │ └─pve-vhd_tdata 252:4 0 725G 0 lvm │ └─pve-vhd 252:5 0 725G 0 lvm └─md127 9:127 0 0B 0 md sdc 8:32 0 1.8T 0 disk └─sdc1 8:33 0 1.8T 0 part sr0 11:0 1 1024M 0 rom root@kvmhost2:~# blkid /dev/sdb: TYPE="isw_raid_member" /dev/sdc1: LABEL="CLONEZILLA_2_UNG_MT" BLOCK_SIZE="512" UUID="9A50FC0950FBE9C1" TYPE="ntfs" PARTUUID="f4529c2d-01" /dev/md126p2: LABEL="BOOT" UUID="aee27756-d3a8-4fd9-8744-6ba1de309c25" BLOCK_SIZE="4096" TYPE="ext4" /dev/md126p10: UUID="0EUgL1-sDg7-Y5B5-rk7M-RfiU-vbDf-mzsI5b" TYPE="LVM2_member" /dev/md126p9: LABEL="VAR" UUID="5c8dfc8a-18d4-4c42-904f-eb2a22c48753" BLOCK_SIZE="4096" TYPE="ext4" /dev/md126p7: LABEL="TMP" UUID="65f021e0-d96c-4dd8-a23b-2f29d98516a3" BLOCK_SIZE="4096" TYPE="ext4" /dev/md126p5: LABEL="ROOTFS" UUID="274a6b75-baef-4704-8318-80ee8c3e7967" BLOCK_SIZE="4096" TYPE="ext4" /dev/md126p1: SEC_TYPE="msdos" LABEL_FATBOOT="MS-DOS_6" LABEL="MS-DOS_6" UUID="58CE-8254" BLOCK_SIZE="512" TYPE="vfat" /dev/md126p8: UUID="544cea28-a048-4b96-9a17-e971ad1b501a" TYPE="swap" /dev/md126p6: LABEL="HOME" UUID="ebb47371-3ee5-40db-928f-74e2bf98a2b1" BLOCK_SIZE="4096" TYPE="ext4" /dev/sda: TYPE="isw_raid_member" root@kvmhost2:~# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS fd0 2:0 1 4K 0 disk sda 8:0 0 931.5G 0 disk ├─md126 9:126 0 931.5G 0 raid1 │ ├─md126p1 259:0 0 133.3M 0 part /dos │ ├─md126p2 259:1 0 800M 0 part /boot │ ├─md126p3 259:2 0 1K 0 part │ ├─md126p5 259:3 0 15G 0 part / │ ├─md126p6 259:4 0 150G 0 part /home │ ├─md126p7 259:5 0 10G 0 part /tmp │ ├─md126p8 259:6 0 8G 0 part [SWAP] │ ├─md126p9 259:7 0 20G 0 part /var │ └─md126p10 259:8 0 727.6G 0 part │ ├─pve-vhd_meta0 252:0 0 92M 0 lvm │ ├─pve-vhd_meta1 252:1 0 92M 0 lvm │ ├─pve-vhd_meta2 252:2 0 92M 0 lvm │ ├─pve-vhd_tmeta 252:3 0 92M 0 lvm │ │ └─pve-vhd 252:5 0 725G 0 lvm │ └─pve-vhd_tdata 252:4 0 725G 0 lvm │ └─pve-vhd 252:5 0 725G 0 lvm └─md127 9:127 0 0B 0 md sdb 8:16 0 931.5G 0 disk ├─md126 9:126 0 931.5G 0 raid1 │ ├─md126p1 259:0 0 133.3M 0 part /dos │ ├─md126p2 259:1 0 800M 0 part /boot │ ├─md126p3 259:2 0 1K 0 part │ ├─md126p5 259:3 0 15G 0 part / │ ├─md126p6 259:4 0 150G 0 part /home │ ├─md126p7 259:5 0 10G 0 part /tmp │ ├─md126p8 259:6 0 8G 0 part [SWAP] │ ├─md126p9 259:7 0 20G 0 part /var │ └─md126p10 259:8 0 727.6G 0 part │ ├─pve-vhd_meta0 252:0 0 92M 0 lvm │ ├─pve-vhd_meta1 252:1 0 92M 0 lvm │ ├─pve-vhd_meta2 252:2 0 92M 0 lvm │ ├─pve-vhd_tmeta 252:3 0 92M 0 lvm │ │ └─pve-vhd 252:5 0 725G 0 lvm │ └─pve-vhd_tdata 252:4 0 725G 0 lvm │ └─pve-vhd 252:5 0 725G 0 lvm └─md127 9:127 0 0B 0 md sdc 8:32 0 1.8T 0 disk └─sdc1 8:33 0 1.8T 0 part sr0 11:0 1 1024M 0 rom root@kvmhost2:~# lvdisplay --- Logical volume --- LV Name vhd VG Name pve LV UUID 1canK4-K2Sd-oJLN-6m3A-i15o-37hL-WfT8xN LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:39 +0200 LV Pool metadata vhd_tmeta LV Pool data vhd_tdata LV Status available # open 0 LV Size 725.00 GiB Allocated pool data 0.00% Allocated metadata 10.42% Current LE 185600 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 2048 Block device 252:5 --- Logical volume --- LV Path /dev/pve/vm-100-disk-0 LV Name vm-100-disk-0 VG Name pve LV UUID c5boqJ-3W4s-yRdc-deTX-lXvi-Fp5N-Hos7wW LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-14 17:25:56 +0200 LV Pool name vhd LV Status NOT available LV Size 64.19 GiB Current LE 16433 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vm-101-disk-0 LV Name vm-101-disk-0 VG Name pve LV UUID vdyifN-OM6x-0OTf-olmO-VIpV-Yxmt-8ksWwG LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-20 14:53:00 +0200 LV Pool name vhd LV Status NOT available LV Size 20.06 GiB Current LE 5136 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vhd_meta0 LV Name vhd_meta0 VG Name pve LV UUID ePnSFc-4MXf-szYj-pgCe-lOpd-xDwL-v3g2yV LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:39 +0200 LV Status available # open 0 LV Size 92.00 MiB Current LE 23 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 252:0 --- Logical volume --- LV Path /dev/pve/vhd_meta1 LV Name vhd_meta1 VG Name pve LV UUID Oqv6ME-4ghD-ocC7-jF1c-ZTSJ-qnZk-26SrFo LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:38 +0200 LV Status available # open 0 LV Size 92.00 MiB Current LE 23 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 252:1 root@kvmhost2:~# lvdisplay --- Logical volume --- LV Name vhd VG Name pve LV UUID 1canK4-K2Sd-oJLN-6m3A-i15o-37hL-WfT8xN LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:39 +0200 LV Pool metadata vhd_tmeta LV Pool data vhd_tdata LV Status available # open 0 LV Size 725.00 GiB Allocated pool data 0.00% Allocated metadata 10.42% Current LE 185600 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 2048 Block device 252:5 --- Logical volume --- LV Path /dev/pve/vm-100-disk-0 LV Name vm-100-disk-0 VG Name pve LV UUID c5boqJ-3W4s-yRdc-deTX-lXvi-Fp5N-Hos7wW LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-14 17:25:56 +0200 LV Pool name vhd LV Status NOT available LV Size 64.19 GiB Current LE 16433 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vm-101-disk-0 LV Name vm-101-disk-0 VG Name pve LV UUID vdyifN-OM6x-0OTf-olmO-VIpV-Yxmt-8ksWwG LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-20 14:53:00 +0200 LV Pool name vhd LV Status NOT available LV Size 20.06 GiB Current LE 5136 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vhd_meta0 LV Name vhd_meta0 VG Name pve LV UUID ePnSFc-4MXf-szYj-pgCe-lOpd-xDwL-v3g2yV LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:39 +0200 LV Status available # open 0 LV Size 92.00 MiB Current LE 23 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 252:0 --- Logical volume --- LV Path /dev/pve/vhd_meta1 LV Name vhd_meta1 VG Name pve LV UUID Oqv6ME-4ghD-ocC7-jF1c-ZTSJ-qnZk-26SrFo LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:38 +0200 LV Status available # open 0 LV Size 92.00 MiB Current LE 23 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 252:1 root@kvmhost2:~# service proxmox- proxmox-boot-cleanup proxmox-firewall root@kvmhost2:~# service proxmox- proxmox-boot-cleanup proxmox-firewall root@kvmhost2:~# service k keyboard-setup keyboard-setup.sh kmod kmod-static-nodes root@kvmhost2:~# lsof -i :8006 COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME pveproxy 1271 www-data 6u IPv6 7826 0t0 TCP *:8006 (LISTEN) pveproxy 2704469 www-data 6u IPv6 7826 0t0 TCP *:8006 (LISTEN) pveproxy 2711375 www-data 6u IPv6 7826 0t0 TCP *:8006 (LISTEN) pveproxy 2711841 www-data 6u IPv6 7826 0t0 TCP *:8006 (LISTEN) root@kvmhost2:~# service pve pvebanner pve-firewall pve-ha-lrm pvescheduler pve-cluster pvefw-logger pve-lxc-syscalld pvestatd pvedaemon pve-guests pvenetcommit pve-daily-update pve-ha-crm pveproxy root@kvmhost2:~# service pve pvebanner pve-firewall pve-ha-lrm pvescheduler pve-cluster pvefw-logger pve-lxc-syscalld pvestatd pvedaemon pve-guests pvenetcommit pve-daily-update pve-ha-crm pveproxy root@kvmhost2:~# lvconvert --repair /dev/pve/vhd Active pools cannot be repaired. Use lvchange -an first. root@kvmhost2:~# lvchange -an No command with matching syntax recognised. Run 'lvchange --help' for more information. Nearest similar command has syntax: lvchange -a|--activate y|n|ay VG|LV|Tag|Select ... Activate or deactivate an LV. root@kvmhost2:~# lvchange -a n No command with matching syntax recognised. Run 'lvchange --help' for more information. Nearest similar command has syntax: lvchange -a|--activate y|n|ay VG|LV|Tag|Select ... Activate or deactivate an LV. root@kvmhost2:~# man lvchange root@kvmhost2:~# man lvchange root@kvmhost2:~# lvconvert --repair /dev/pve/vhd Active pools cannot be repaired. Use lvchange -an first. root@kvmhost2:~# lvchange -an No command with matching syntax recognised. Run 'lvchange --help' for more information. Nearest similar command has syntax: lvchange -a|--activate y|n|ay VG|LV|Tag|Select ... Activate or deactivate an LV. root@kvmhost2:~# lvchange -an pve root@kvmhost2:~# lvconvert --repair /dev/pve/vhd Active pools cannot be repaired. Use lvchange -an first. root@kvmhost2:~# lvdisplay --- Logical volume --- LV Name vhd VG Name pve LV UUID 1canK4-K2Sd-oJLN-6m3A-i15o-37hL-WfT8xN LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:39 +0200 LV Pool metadata vhd_tmeta LV Pool data vhd_tdata LV Status available # open 0 LV Size 725.00 GiB Allocated pool data 0.00% Allocated metadata 10.42% Current LE 185600 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 2048 Block device 252:3 --- Logical volume --- LV Path /dev/pve/vm-100-disk-0 LV Name vm-100-disk-0 VG Name pve LV UUID c5boqJ-3W4s-yRdc-deTX-lXvi-Fp5N-Hos7wW LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-14 17:25:56 +0200 LV Pool name vhd LV Status NOT available LV Size 64.19 GiB Current LE 16433 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vm-101-disk-0 LV Name vm-101-disk-0 VG Name pve LV UUID vdyifN-OM6x-0OTf-olmO-VIpV-Yxmt-8ksWwG LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-20 14:53:00 +0200 LV Pool name vhd LV Status NOT available LV Size 20.06 GiB Current LE 5136 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vhd_meta0 LV Name vhd_meta0 VG Name pve LV UUID ePnSFc-4MXf-szYj-pgCe-lOpd-xDwL-v3g2yV LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:39 +0200 LV Status NOT available LV Size 92.00 MiB Current LE 23 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vhd_meta1 LV Name vhd_meta1 VG Name pve LV UUID Oqv6ME-4ghD-ocC7-jF1c-ZTSJ-qnZk-26SrFo LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:38 +0200 LV Status NOT available LV Size 92.00 MiB Current LE 23 Segments 1 Allocation inherit Read ahead sectors auto root@kvmhost2:~# lvchange -ay No command with matching syntax recognised. Run 'lvchange --help' for more information. Nearest similar command has syntax: lvchange -a|--activate y|n|ay VG|LV|Tag|Select ... Activate or deactivate an LV. root@kvmhost2:~# lvchange -a y No command with matching syntax recognised. Run 'lvchange --help' for more information. Nearest similar command has syntax: lvchange -a|--activate y|n|ay VG|LV|Tag|Select ... Activate or deactivate an LV. root@kvmhost2:~# lvchange -a y pve device-mapper: reload ioctl on (252:4) failed: Keine Daten verfügbar device-mapper: reload ioctl on (252:4) failed: Keine Daten verfügbar root@kvmhost2:~# shutdown -r now root@kvmhost2:~# login as: dreael dreael@172.29.63.11's password: Linux kvmhost2 6.8.4-3-pve #1 SMP PREEMPT_DYNAMIC PMX 6.8.4-3 (2024-05-02T11:55Z) x86_64 The programs included with the Debian GNU/Linux system are free software; the exact distribution terms for each program are described in the individual files in /usr/share/doc/*/copyright. Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent permitted by applicable law. Last login: Thu Jun 20 20:22:02 2024 from 172.29.63.154 dreael@kvmhost2:~$ su - Passwort: root@kvmhost2:~# lvdisplay --- Logical volume --- LV Name vhd VG Name pve LV UUID 1canK4-K2Sd-oJLN-6m3A-i15o-37hL-WfT8xN LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:39 +0200 LV Pool metadata vhd_tmeta LV Pool data vhd_tdata LV Status available # open 0 LV Size 725.00 GiB Allocated pool data 0.00% Allocated metadata 10.42% Current LE 185600 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 2048 Block device 252:2 --- Logical volume --- LV Path /dev/pve/vm-100-disk-0 LV Name vm-100-disk-0 VG Name pve LV UUID c5boqJ-3W4s-yRdc-deTX-lXvi-Fp5N-Hos7wW LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-14 17:25:56 +0200 LV Pool name vhd LV Status NOT available LV Size 64.19 GiB Current LE 16433 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vm-101-disk-0 LV Name vm-101-disk-0 VG Name pve LV UUID vdyifN-OM6x-0OTf-olmO-VIpV-Yxmt-8ksWwG LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-20 14:53:00 +0200 LV Pool name vhd LV Status NOT available LV Size 20.06 GiB Current LE 5136 Segments 1 Allocation inherit Read ahead sectors auto --- Logical volume --- LV Path /dev/pve/vhd_meta0 LV Name vhd_meta0 VG Name pve LV UUID ePnSFc-4MXf-szYj-pgCe-lOpd-xDwL-v3g2yV LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:39 +0200 LV Status available # open 0 LV Size 92.00 MiB Current LE 23 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 252:3 --- Logical volume --- LV Path /dev/pve/vhd_meta1 LV Name vhd_meta1 VG Name pve LV UUID Oqv6ME-4ghD-ocC7-jF1c-ZTSJ-qnZk-26SrFo LV Write Access read/write LV Creation host, time debian, 2024-06-14 18:30:38 +0200 LV Status available # open 0 LV Size 92.00 MiB Current LE 23 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 256 Block device 252:4 root@kvmhost2:~# lvconvert --repair /dev/pve/vhd Active pools cannot be repaired. Use lvchange -an first. root@kvmhost2:~# vgchange -a y device-mapper: reload ioctl on (252:5) failed: Keine Daten verfügbar device-mapper: reload ioctl on (252:5) failed: Keine Daten verfügbar 3 logical volume(s) in volume group "pve" now active root@kvmhost2:~# history 1 nano /etc/network/interfaces 2 lsblk 3 shutdown -r now 4 lvdisplay 5 vgdisplay 6 lsblk 7 fdisk -l /dev/md126 8 nano /etc/hosts 9 ping kvmhost2 10 echo "deb [arch=amd64] http://download.proxmox.com/debian/pve bookworm pve-no-subscription" > /etc/apt/sources.list.d/pve-install-repo.list 11 wget https://enterprise.proxmox.com/debian/proxmox-release-bookworm.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg 12 sha512sum /etc/apt/trusted.gpg.d/proxmox-release-bookworm.gpg 13 apt-get clean 14 apt-get update 15 apt-get upgrade 16 apt-get dist-upgrade 17 apt-get install proxmox-default-kernel 18 shutdown -r now 19 apt-get install proxmox-ve postfix open-iscsi chrony 20 echo $? 21 apt remove linux-image-amd64 'linux-image-6.1*' 22 apt remove os-prober 23 update-grub 24 shutdown -r now 25 nano /etc/network/interfaces 26 shutdown -r now 27 cd /home/isos/template/iso 28 chown dreael:dreael . 29 ls -al 30 exit 31 shutdown -h now 32 lsblk 33 shutdown -r now 34 vgck pve 35 vgck -v pve 36 vgck pve -v 37 man vgck 38 vgck --reportformat basic pve 39 ls -alt|more 40 more .lesshst 41 journalctl|tail -n 20 42 vgdisplay 43 lvdisplay 44 journalctl|tail -n 20 45 lvdisplay 46 lvconvert --repair /dev/pve/vhd 47 vgck /dev/pve/vhd 48 vgck pve 49 lvdisplay 50 vgchange -a 51 vgchange -ay 52 lvdisplay 53 lvconvert --repair /dev/pve/vm-100-disk-0 54 man lvconvert 55 man vgchange 56 vgck pve 57 vgck -v pve 58 lvconvert --repair /dev/pve/vhd 59 echo $? 60 vgchange -a pve 61 vgchange pve -a 62 vgchange pve -ay 63 lvdisplay 64 vgcfgbackup pve -f test1.txt 65 more test1.txt 66 vgcfgrestore -f test1.txt 67 vgcfgrestore -f test1.txt pve 68 dmsetup remove_all 69 lsvg 70 vgdisplay 71 lvdisplay 72 vgcfgrestore -f test1.txt pve 73 vgcfgrestore --force -f test1.txt pve 74 lvdisplay 75 more test1.txt 76 shutdown -r now 77 lvdisplay 78 vgck pve 79 pvs 80 lvs 81 lvconvert --repair /dev/pve/vhd 82 lvremove /dev/pve/vhd_meta2 83 lvconvert --repair /dev/pve/vhd 84 vgchange -ay pve 85 lvdisplay 86 lvdisplay 87 lvdisplay|more 88 lvdisplay|more 89 vgchange -a y 90 vgcfgrestore --force -f test1.txt pve 91 echo $? 92 vgchange -a y 93 lvconvert --repair /dev/pve/vhd 94 echo $? 95 cp -p test1.txt test1a.txt 96 nano test1a.txt 97 nano test1a.txt 98 vgcfgrestore --force -f test1a.txt pve 99 vgchange -a y 100 lslblk 101 lsblk 102 blkid 103 lsblk 104 lvdisplay 105 lvdisplay 106 lsof -i :8006 107 lvconvert --repair /dev/pve/vhd 108 lvchange -an 109 lvchange -a n 110 man lvchange 111 man lvchange 112 lvconvert --repair /dev/pve/vhd 113 lvchange -an 114 lvchange -an pve 115 lvconvert --repair /dev/pve/vhd 116 lvdisplay 117 lvchange -ay 118 lvchange -a y 119 lvchange -a y pve 120 shutdown -r now 121 lvdisplay 122 lvconvert --repair /dev/pve/vhd 123 vgchange -a y 124 history root@kvmhost2:~# vgck /dev/pve/vhd Invalid volume group name pve/vhd. Run `vgck --help' for more information. root@kvmhost2:~# vgck pve root@kvmhost2:~# lvchange -a y pve device-mapper: reload ioctl on (252:5) failed: Keine Daten verfügbar device-mapper: reload ioctl on (252:5) failed: Keine Daten verfügbar root@kvmhost2:~# lvs -a LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert [lvol2_pmspare] pve ewi------- 92.00m vhd pve twi-a-tz-- 725.00g 0.00 10.42 vhd_meta0 pve -wi-a----- 92.00m vhd_meta1 pve -wi-a----- 92.00m [vhd_tdata] pve Twi-ao---- 725.00g [vhd_tmeta] pve ewi-ao---- 92.00m vm-100-disk-0 pve Vwi---tz-- 64.19g vhd vm-101-disk-0 pve Vwi---tz-- 20.06g vhd root@kvmhost2:~# _ ========================================================= Since no success in restore, deleting the complete LVM... ========================================================= root@kvmhost2:~# lvremove pve vm-100-disk-0 Removing pool pve/vhd will remove 2 dependent volume(s). Proceed? [y/n]: y Logical volume "vm-100-disk-0" successfully removed. Logical volume "vm-101-disk-0" successfully removed. Logical volume "vhd" successfully removed. Do you really want to remove active logical volume pve/vhd_meta0? [y/n]: y Logical volume "vhd_meta0" successfully removed. Do you really want to remove active logical volume pve/vhd_meta1? [y/n]: y Logical volume "vhd_meta1" successfully removed. Volume group "vm-100-disk-0" not found Cannot process volume group vm-100-disk-0 root@kvmhost2:~# lvremove pve vm-101-disk-0 Volume group "vm-101-disk-0" not found Cannot process volume group vm-101-disk-0 root@kvmhost2:~# lvdisplay root@kvmhost2:~# vgdisplay --- Volume group --- VG Name pve System ID Format lvm2 Metadata Areas 1 Metadata Sequence No 28 VG Access read/write VG Status resizable MAX LV 0 Cur LV 0 Open LV 0 Max PV 0 Cur PV 1 Act PV 1 VG Size <727.59 GiB PE Size 4.00 MiB Total PE 186263 Alloc PE / Size 0 / 0 Free PE / Size 186263 / <727.59 GiB VG UUID NdHTpB-v7Jm-eor4-gYu9-Hiox-F6ce-IX3rok root@kvmhost2:~# vgremove pve Volume group "pve" successfully removed root@kvmhost2:~# pvremove /dev/md126p10 Labels on physical volume "/dev/md126p10" successfully wiped. root@kvmhost2:~# shutdown -r now ============================== ... and rebuilding it manually ============================== root@kvmhost2:~# login as: dreael dreael@172.29.63.11's password: Linux kvmhost2 6.8.4-3-pve #1 SMP PREEMPT_DYNAMIC PMX 6.8.4-3 (2024-05-02T11:55Z) x86_64 The programs included with the Debian GNU/Linux system are free software; the exact distribution terms for each program are described in the individual files in /usr/share/doc/*/copyright. Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent permitted by applicable law. Last login: Tue Jun 25 14:51:03 2024 from 172.29.63.154 dreael@kvmhost2:~$ su - Passwort: root@kvmhost2:~# fdisk /ll Welcome to fdisk (util-linux 2.38.1). Changes will remain in memory only, until you decide to write them. Be careful before using the write command. fdisk: cannot open /ll: Datei oder Verzeichnis nicht gefunden root@kvmhost2:~# fdisk /l Welcome to fdisk (util-linux 2.38.1). Changes will remain in memory only, until you decide to write them. Be careful before using the write command. fdisk: cannot open /l: Datei oder Verzeichnis nicht gefunden root@kvmhost2:~# fdisk /l Welcome to fdisk (util-linux 2.38.1). Changes will remain in memory only, until you decide to write them. Be careful before using the write command. fdisk: cannot open /l: Datei oder Verzeichnis nicht gefunden root@kvmhost2:~# fdisk -l Disk /dev/sda: 931.51 GiB, 1000204886016 bytes, 1953525168 sectors Disk model: Samsung SSD 870 Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disklabel type: dos Disk identifier: 0x00000000 Device Boot Start End Sectors Size Id Type /dev/sda1 * 63 273104 273042 133.3M 6 FAT16 /dev/sda2 274432 1912831 1638400 800M 83 Linux /dev/sda3 1912832 1953519615 1951606784 930.6G f W95 Ext'd (LBA) /dev/sda5 1914880 33372159 31457280 15G 83 Linux /dev/sda6 33374208 347947007 314572800 150G 83 Linux /dev/sda7 347949056 368920575 20971520 10G 83 Linux /dev/sda8 368922624 385699839 16777216 8G 82 Linux swap / Solaris /dev/sda9 385701888 427644927 41943040 20G 83 Linux /dev/sda10 427646976 1953519615 1525872640 727.6G 8e Linux LVM Disk /dev/sdb: 931.51 GiB, 1000204886016 bytes, 1953525168 sectors Disk model: Samsung SSD 870 Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disklabel type: dos Disk identifier: 0x00000000 Device Boot Start End Sectors Size Id Type /dev/sdb1 * 63 273104 273042 133.3M 6 FAT16 /dev/sdb2 274432 1912831 1638400 800M 83 Linux /dev/sdb3 1912832 1953519615 1951606784 930.6G f W95 Ext'd (LBA) /dev/sdb5 1914880 33372159 31457280 15G 83 Linux /dev/sdb6 33374208 347947007 314572800 150G 83 Linux /dev/sdb7 347949056 368920575 20971520 10G 83 Linux /dev/sdb8 368922624 385699839 16777216 8G 82 Linux swap / Solaris /dev/sdb9 385701888 427644927 41943040 20G 83 Linux /dev/sdb10 427646976 1953519615 1525872640 727.6G 8e Linux LVM Disk /dev/md126: 931.51 GiB, 1000202043392 bytes, 1953519616 sectors Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disklabel type: dos Disk identifier: 0x00000000 Device Boot Start End Sectors Size Id Type /dev/md126p1 * 63 273104 273042 133.3M 6 FAT16 /dev/md126p2 274432 1912831 1638400 800M 83 Linux /dev/md126p3 1912832 1953519615 1951606784 930.6G f W95 Ext'd (LBA) /dev/md126p5 1914880 33372159 31457280 15G 83 Linux /dev/md126p6 33374208 347947007 314572800 150G 83 Linux /dev/md126p7 347949056 368920575 20971520 10G 83 Linux /dev/md126p8 368922624 385699839 16777216 8G 82 Linux swap / Solari /dev/md126p9 385701888 427644927 41943040 20G 83 Linux /dev/md126p10 427646976 1953519615 1525872640 727.6G 8e Linux LVM Disk /dev/sdc: 1.82 TiB, 2000398933504 bytes, 3907029167 sectors Disk model: External Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 512 bytes / 512 bytes Disklabel type: dos Disk identifier: 0xf4529c2d Device Boot Start End Sectors Size Id Type /dev/sdc1 2048 3907026943 3907024896 1.8T 7 HPFS/NTFS/exFAT root@kvmhost2:~# pvcreate /dev/md126p10 Physical volume "/dev/md126p10" successfully created. root@kvmhost2:~# vgcreate /dev/md126p10 pve /dev/md126p10: already exists in filesystem Run `vgcreate --help' for more information. root@kvmhost2:~# vgcreate pve /dev/md126p10 Volume group "pve" successfully created root@kvmhost2:~# lsvg -bash: lsvg: Kommando nicht gefunden. root@kvmhost2:~# vgdisplay --- Volume group --- VG Name pve System ID Format lvm2 Metadata Areas 1 Metadata Sequence No 1 VG Access read/write VG Status resizable MAX LV 0 Cur LV 0 Open LV 0 Max PV 0 Cur PV 1 Act PV 1 VG Size <727.59 GiB PE Size 4.00 MiB Total PE 186263 Alloc PE / Size 0 / 0 Free PE / Size 186263 / <727.59 GiB VG UUID 9GPX0C-cHPz-H8L5-nTgs-1Cnc-RpAE-hmBLI6 root@kvmhost2:~# lvcreate --type thin-pool -L 727G -n vhd pve Thin pool volume with chunk size 512.00 KiB can address at most 127.00 TiB of data. WARNING: Pool zeroing and 512.00 KiB large chunk size slows down thin provisioning. WARNING: Consider disabling zeroing (-Zn) or using smaller chunk size (<512.00 KiB). Logical volume "vhd" created. root@kvmhost2:~# lvcreate -V 64.2G -n vm-100-disk-0 --thinpool vhd pve Rounding up size to full physical extent 64.20 GiB Logical volume "vm-100-disk-0" created. root@kvmhost2:~# lvcreate -V 20.07G -n vm-101-disk-0 --thinpool vhd pve Rounding up size to full physical extent 20.07 GiB Logical volume "vm-101-disk-0" created. root@kvmhost2:~# lsblk NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINTS fd0 2:0 1 4K 0 disk sda 8:0 0 931.5G 0 disk ├─md126 9:126 0 931.5G 0 raid1 │ ├─md126p1 259:0 0 133.3M 0 part /dos │ ├─md126p2 259:1 0 800M 0 part /boot │ ├─md126p3 259:2 0 1K 0 part │ ├─md126p5 259:3 0 15G 0 part / │ ├─md126p6 259:4 0 150G 0 part /home │ ├─md126p7 259:5 0 10G 0 part /tmp │ ├─md126p8 259:6 0 8G 0 part [SWAP] │ ├─md126p9 259:7 0 20G 0 part /var │ └─md126p10 259:8 0 727.6G 0 part │ ├─pve-vhd_tmeta 252:0 0 92M 0 lvm │ │ └─pve-vhd-tpool 252:2 0 727G 0 lvm │ │ ├─pve-vhd 252:3 0 727G 1 lvm │ │ ├─pve-vm--100--disk--0 252:4 0 64.2G 0 lvm │ │ └─pve-vm--101--disk--0 252:5 0 20.1G 0 lvm │ └─pve-vhd_tdata 252:1 0 727G 0 lvm │ └─pve-vhd-tpool 252:2 0 727G 0 lvm │ ├─pve-vhd 252:3 0 727G 1 lvm │ ├─pve-vm--100--disk--0 252:4 0 64.2G 0 lvm │ └─pve-vm--101--disk--0 252:5 0 20.1G 0 lvm └─md127 9:127 0 0B 0 md sdb 8:16 0 931.5G 0 disk ├─md126 9:126 0 931.5G 0 raid1 │ ├─md126p1 259:0 0 133.3M 0 part /dos │ ├─md126p2 259:1 0 800M 0 part /boot │ ├─md126p3 259:2 0 1K 0 part │ ├─md126p5 259:3 0 15G 0 part / │ ├─md126p6 259:4 0 150G 0 part /home │ ├─md126p7 259:5 0 10G 0 part /tmp │ ├─md126p8 259:6 0 8G 0 part [SWAP] │ ├─md126p9 259:7 0 20G 0 part /var │ └─md126p10 259:8 0 727.6G 0 part │ ├─pve-vhd_tmeta 252:0 0 92M 0 lvm │ │ └─pve-vhd-tpool 252:2 0 727G 0 lvm │ │ ├─pve-vhd 252:3 0 727G 1 lvm │ │ ├─pve-vm--100--disk--0 252:4 0 64.2G 0 lvm │ │ └─pve-vm--101--disk--0 252:5 0 20.1G 0 lvm │ └─pve-vhd_tdata 252:1 0 727G 0 lvm │ └─pve-vhd-tpool 252:2 0 727G 0 lvm │ ├─pve-vhd 252:3 0 727G 1 lvm │ ├─pve-vm--100--disk--0 252:4 0 64.2G 0 lvm │ └─pve-vm--101--disk--0 252:5 0 20.1G 0 lvm └─md127 9:127 0 0B 0 md sdc 8:32 0 1.8T 0 disk └─sdc1 8:33 0 1.8T 0 part sr0 11:0 1 1024M 0 rom root@kvmhost2:~# ls /dev/mapper/ control pve-vhd_tmeta pve-vm--101--disk--0 pve-vhd pve-vhd-tpool pve-vhd_tdata pve-vm--100--disk--0 root@kvmhost2:~# mount /dev/sdc1 /mnt root@kvmhost2:~# cd /mnt root@kvmhost2:/mnt# ls 2024-05-27-14-img_KVM1_Polen_Win10_Test 2024-05-30-16-img_KVM2_Polen_Win10_Test 2024-06-10-14-img_KVM1_Deb_KVM_mit_LVM 2024-06-20-15-img_KVM2_Proxmox_LVM_nurThinPool 'System Volume Information' root@kvmhost2:/mnt# cd 2024-06-20-15-img_KVM2_Proxmox_LVM_nurThinPool root@kvmhost2:/mnt/2024-06-20-15-img_KVM2_Proxmox_LVM_nurThinPool# ls blkdev.list md126-hidden-data-after-mbr blkid.list md126-mbr clonezilla-img md126p1.vfat-ptcl-img.zst dev-fs.list md126p2.ext4-ptcl-img.zst disk md126p3-ebr dmraid.table md126p5.ext4-ptcl-img.zst Info-dmi.txt md126p6.ext4-ptcl-img.zst Info-img-id.txt md126p7.ext4-ptcl-img.zst Info-img-size.txt md126p9.ext4-ptcl-img.zst Info-lshw.txt md126-pt.parted Info-lspci.txt md126-pt.parted.compact Info-OS-prober.txt md126-pt.sf Info-packages.txt md126.txt Info-saved-by-cmd.txt mdadm.conf Info-smart.txt mdstat.txt lvm_logv.list parts lvm_pve.conf pve-vm-100-disk-0.dd-ptcl-img.zst lvm_vg_dev.list pve-vm-101-disk-0.dd-ptcl-img.zst md126-chs.sf swappt-md126p8.info root@kvmhost2:/mnt/2024-06-20-15-img_KVM2_Proxmox_LVM_nurThinPool# unzstd /dev/mapper/pve-vm--100--disk--0 root@kvmhost2:/mnt/2024-06-20-15-img_KVM2_Proxmox_LVM_nurThinPool# unzstd /dev/mapper/pve-vm--101--disk--0 root@kvmhost2:/mnt/2024-06-20-15-img_KVM2_Proxmox_LVM_nurThinPool# lvdisplay --- Logical volume --- LV Name vhd VG Name pve LV UUID Yt3vl7-6dr5-8oKm-obca-BUZZ-0e3W-tohG3q LV Write Access read/write (activated read only) LV Creation host, time kvmhost2, 2024-06-25 15:10:57 +0200 LV Pool metadata vhd_tmeta LV Pool data vhd_tdata LV Status available # open 0 LV Size 727.00 GiB Allocated pool data 11.55% Allocated metadata 13.40% Current LE 186112 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 2048 Block device 252:3 --- Logical volume --- LV Path /dev/pve/vm-100-disk-0 LV Name vm-100-disk-0 VG Name pve LV UUID yUPdHB-NJde-Wu3L-CW9P-YJIj-Ts0Y-PWIGCf LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-25 15:13:24 +0200 LV Pool name vhd LV Status available # open 0 LV Size 64.20 GiB Mapped size 99.68% Current LE 16436 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 2048 Block device 252:4 --- Logical volume --- LV Path /dev/pve/vm-101-disk-0 LV Name vm-101-disk-0 VG Name pve LV UUID rHc97r-pUKc-8pST-nGTM-PaT2-Hmli-ahXd1v LV Write Access read/write LV Creation host, time kvmhost2, 2024-06-25 15:14:08 +0200 LV Pool name vhd LV Status available # open 0 LV Size 20.07 GiB Mapped size 99.65% Current LE 5138 Segments 1 Allocation inherit Read ahead sectors auto - currently set to 2048 Block device 252:5 root@kvmhost2:/mnt/2024-06-20-15-img_KVM2_Proxmox_LVM_nurThinPool# fdisk -l /dev/mapper/pve-vm--100--disk--0 Disk /dev/mapper/pve-vm--100--disk--0: 64.2 GiB, 68937580544 bytes, 134643712 sectors Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 524288 bytes / 524288 bytes Disklabel type: dos Disk identifier: 0xf1edd535 Device Boot Start End Sectors Size Id Type /dev/mapper/pve-vm--100--disk--0-part1 * 2048 104447 102400 50M 7 HPFS /dev/mapper/pve-vm--100--disk--0-part2 104448 133062121 132957674 63.4G 7 HPFS /dev/mapper/pve-vm--100--disk--0-part3 133062656 134213631 1150976 562M 27 Hidd root@kvmhost2:/mnt/2024-06-20-15-img_KVM2_Proxmox_LVM_nurThinPool# fdisk -l /dev/mapper/pve-vm--101--disk--0 Disk /dev/mapper/pve-vm--101--disk--0: 20.07 GiB, 21550333952 bytes, 42090496 sectors Units: sectors of 1 * 512 = 512 bytes Sector size (logical/physical): 512 bytes / 512 bytes I/O size (minimum/optimal): 524288 bytes / 524288 bytes Disklabel type: dos Disk identifier: 0xd171d171 Device Boot Start End Sectors Size Id Type /dev/mapper/pve-vm--101--disk--0-part1 * 63 41913584 41913522 20G 7 HPFS Partition 1 does not start on physical sector boundary. root@kvmhost2:/mnt/2024-06-20-15-img_KVM2_Proxmox_LVM_nurThinPool# shutdown -r now Proxmox accepted the rebuilt VMs, i.e. they could be started. Issue of this way: This dd method allocated all sectors, so in this example with the Windows 10 guest, a "defrag /l c:" command must be run inside the guest. This way is possible at least since Clonezilla has saved every virtual hard drive block device inside the thin pool LVM. Sparse restore: root@kvmhost2:~# mount /dev/sdc1 /mnt root@kvmhost2:~# unzstd