diff --git a/core/src/main/java/com/cloud/agent/api/PostMigrationAnswer.java b/core/src/main/java/com/cloud/agent/api/PostMigrationAnswer.java
new file mode 100644
index 000000000000..24fdf8402029
--- /dev/null
+++ b/core/src/main/java/com/cloud/agent/api/PostMigrationAnswer.java
@@ -0,0 +1,42 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.agent.api;
+
+/**
+ * Answer for PostMigrationCommand.
+ * Indicates success or failure of post-migration operations on the destination host.
+ */
+public class PostMigrationAnswer extends Answer {
+
+ protected PostMigrationAnswer() {
+ }
+
+ public PostMigrationAnswer(PostMigrationCommand cmd, String detail) {
+ super(cmd, false, detail);
+ }
+
+ public PostMigrationAnswer(PostMigrationCommand cmd, Exception ex) {
+ super(cmd, ex);
+ }
+
+ public PostMigrationAnswer(PostMigrationCommand cmd) {
+ super(cmd, true, null);
+ }
+}
diff --git a/core/src/main/java/com/cloud/agent/api/PostMigrationCommand.java b/core/src/main/java/com/cloud/agent/api/PostMigrationCommand.java
new file mode 100644
index 000000000000..e32e6eacb344
--- /dev/null
+++ b/core/src/main/java/com/cloud/agent/api/PostMigrationCommand.java
@@ -0,0 +1,54 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.agent.api;
+
+import com.cloud.agent.api.to.VirtualMachineTO;
+
+/**
+ * PostMigrationCommand is sent to the destination host after a successful VM migration.
+ * It performs post-migration tasks such as:
+ * - Claiming exclusive locks on CLVM volumes (converting from shared to exclusive mode)
+ * - Other post-migration cleanup operations
+ */
+public class PostMigrationCommand extends Command {
+ private VirtualMachineTO vm;
+ private String vmName;
+
+ protected PostMigrationCommand() {
+ }
+
+ public PostMigrationCommand(VirtualMachineTO vm, String vmName) {
+ this.vm = vm;
+ this.vmName = vmName;
+ }
+
+ public VirtualMachineTO getVirtualMachine() {
+ return vm;
+ }
+
+ public String getVmName() {
+ return vmName;
+ }
+
+ @Override
+ public boolean executeInSequence() {
+ return true;
+ }
+}
diff --git a/core/src/main/java/org/apache/cloudstack/storage/command/ClvmLockTransferCommand.java b/core/src/main/java/org/apache/cloudstack/storage/command/ClvmLockTransferCommand.java
new file mode 100644
index 000000000000..7d71ba78509b
--- /dev/null
+++ b/core/src/main/java/org/apache/cloudstack/storage/command/ClvmLockTransferCommand.java
@@ -0,0 +1,97 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.storage.command;
+
+import com.cloud.agent.api.Command;
+
+/**
+ * Command to transfer CLVM (Clustered LVM) exclusive lock between hosts.
+ * This enables lightweight volume migration for CLVM storage pools where volumes
+ * reside in the same Volume Group (VG) but need to be accessed from different hosts.
+ *
+ *
Instead of copying volume data (traditional migration), this command simply
+ * deactivates the LV on the source host and activates it exclusively on the destination host.
+ *
+ *
This is significantly faster (10-100x) than traditional migration and uses no network bandwidth.
+ */
+public class ClvmLockTransferCommand extends Command {
+
+ /**
+ * Operation to perform on the CLVM volume.
+ * Maps to lvchange flags for LVM operations.
+ */
+ public enum Operation {
+ /** Deactivate the volume on this host (-an) */
+ DEACTIVATE("-an", "deactivate"),
+
+ /** Activate the volume exclusively on this host (-aey) */
+ ACTIVATE_EXCLUSIVE("-aey", "activate exclusively"),
+
+ /** Activate the volume in shared mode on this host (-asy) */
+ ACTIVATE_SHARED("-asy", "activate in shared mode");
+
+ private final String lvchangeFlag;
+ private final String description;
+
+ Operation(String lvchangeFlag, String description) {
+ this.lvchangeFlag = lvchangeFlag;
+ this.description = description;
+ }
+
+ public String getLvchangeFlag() {
+ return lvchangeFlag;
+ }
+
+ public String getDescription() {
+ return description;
+ }
+ }
+
+ private String lvPath;
+ private Operation operation;
+ private String volumeUuid;
+
+ public ClvmLockTransferCommand() {
+ // For serialization
+ }
+
+ public ClvmLockTransferCommand(Operation operation, String lvPath, String volumeUuid) {
+ this.operation = operation;
+ this.lvPath = lvPath;
+ this.volumeUuid = volumeUuid;
+ // Execute in sequence to ensure lock safety
+ setWait(30);
+ }
+
+ public String getLvPath() {
+ return lvPath;
+ }
+
+ public Operation getOperation() {
+ return operation;
+ }
+
+ public String getVolumeUuid() {
+ return volumeUuid;
+ }
+
+ @Override
+ public boolean executeInSequence() {
+ return true;
+ }
+}
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java
index 8b0171870765..74d18fe26940 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java
@@ -31,6 +31,18 @@
public interface VolumeInfo extends DownloadableDataInfo, Volume {
+ /**
+ * Constant for the volume detail key that stores the destination host ID for CLVM volume creation routing.
+ * This helps ensure volumes are created on the correct host with exclusive locks.
+ */
+ String DESTINATION_HOST_ID = "destinationHostId";
+
+ /**
+ * Constant for the volume detail key that stores the host ID currently holding the CLVM exclusive lock.
+ * This is used during lightweight lock migration to determine the source host for lock transfer.
+ */
+ String CLVM_LOCK_HOST_ID = "clvmLockHostId";
+
boolean isAttachedVM();
void addPayload(Object data);
@@ -103,4 +115,21 @@ public interface VolumeInfo extends DownloadableDataInfo, Volume {
List getCheckpointPaths();
Set getCheckpointImageStoreUrls();
+
+ /**
+ * Gets the destination host ID hint for CLVM volume creation.
+ * This is used to route volume creation commands to the specific host where the VM will be deployed.
+ * Only applicable for CLVM storage pools to avoid shared mode activation.
+ *
+ * @return The host ID where the volume should be created, or null if not set
+ */
+ Long getDestinationHostId();
+
+ /**
+ * Sets the destination host ID hint for CLVM volume creation.
+ * This should be set before volume creation when the destination host is known.
+ *
+ * @param hostId The host ID where the volume should be created
+ */
+ void setDestinationHostId(Long hostId);
}
diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
index e8796fb02529..78f62f1ebd6c 100755
--- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -50,6 +50,7 @@
import javax.persistence.EntityExistsException;
+import com.cloud.agent.api.PostMigrationCommand;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
@@ -3238,6 +3239,22 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy
logger.warn("Error while checking the vm {} on host {}", vm, dest.getHost(), e);
}
migrated = true;
+ try {
+ logger.info("Executing post-migration tasks for VM {} on destination host {}", vm.getInstanceName(), dstHostId);
+ final PostMigrationCommand postMigrationCommand = new PostMigrationCommand(to, vm.getInstanceName());
+ final Answer postMigrationAnswer = _agentMgr.send(dstHostId, postMigrationCommand);
+
+ if (postMigrationAnswer == null || !postMigrationAnswer.getResult()) {
+ final String details = postMigrationAnswer != null ? postMigrationAnswer.getDetails() : "null answer returned";
+ logger.warn("Post-migration tasks failed for VM {} on destination host {}: {}. Migration completed but some cleanup may be needed.",
+ vm.getInstanceName(), dstHostId, details);
+ } else {
+ logger.info("Successfully completed post-migration tasks for VM {} on destination host {}", vm.getInstanceName(), dstHostId);
+ }
+ } catch (Exception e) {
+ logger.warn("Exception during post-migration tasks for VM {} on destination host {}: {}. Migration completed but some cleanup may be needed.",
+ vm.getInstanceName(), dstHostId, e.getMessage(), e);
+ }
} finally {
if (!migrated) {
logger.info("Migration was unsuccessful. Cleaning up: {}", vm);
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index e8c75afa81c5..21ce76895925 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -745,6 +745,15 @@ public VolumeInfo createVolume(VolumeInfo volumeInfo, VirtualMachine vm, Virtual
logger.debug("Trying to create volume [{}] on storage pool [{}].",
volumeToString, poolToString);
DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
+
+ // For CLVM pools, set the destination host hint so volume is created on the correct host
+ // This avoids the need for shared mode activation and improves performance
+ if (pool.getPoolType() == Storage.StoragePoolType.CLVM && hostId != null) {
+ logger.info("CLVM pool detected. Setting destination host {} for volume {} to route creation to correct host",
+ hostId, volumeInfo.getUuid());
+ volumeInfo.setDestinationHostId(hostId);
+ }
+
for (int i = 0; i < 2; i++) {
// retry one more time in case of template reload is required for Vmware case
AsyncCallFuture future = null;
@@ -1851,6 +1860,20 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro
future = volService.createManagedStorageVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId);
} else {
+ // For CLVM pools, set the destination host hint so volume is created on the correct host
+ // This avoids the need for shared mode activation and improves performance
+ StoragePoolVO poolVO = _storagePoolDao.findById(destPool.getId());
+ if (poolVO != null && poolVO.getPoolType() == Storage.StoragePoolType.CLVM) {
+ Long hostId = vm.getVirtualMachine().getHostId();
+ if (hostId != null) {
+ // Store in both memory and database so it persists across VolumeInfo recreations
+ volume.setDestinationHostId(hostId);
+ _volDetailDao.addDetail(volume.getId(), VolumeInfo.DESTINATION_HOST_ID, String.valueOf(hostId), false);
+ logger.info("CLVM pool detected during volume creation from template. Setting destination host {} for volume {} (persisted to DB) to route creation to correct host",
+ hostId, volume.getUuid());
+ }
+ }
+
future = volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ);
}
}
diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java
index cdf903407c17..86f492e81d27 100644
--- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java
+++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java
@@ -148,7 +148,7 @@ public boolean configure(String name, Map params) throws Configu
idStateNeqSearch = createSearchBuilder();
idStateNeqSearch.and(SNAPSHOT_ID, idStateNeqSearch.entity().getSnapshotId(), SearchCriteria.Op.EQ);
- idStateNeqSearch.and(STATE, idStateNeqSearch.entity().getState(), SearchCriteria.Op.NEQ);
+ idStateNeqSearch.and(STATE, idStateNeqSearch.entity().getState(), SearchCriteria.Op.NIN);
idStateNeqSearch.done();
snapshotVOSearch = snapshotDao.createSearchBuilder();
diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java
index b71d6cf3afac..665c3a4659ca 100644
--- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java
+++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java
@@ -27,6 +27,7 @@
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.Snapshot;
+import com.cloud.storage.Storage;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.vm.snapshot.VMSnapshotDetailsVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao;
@@ -468,6 +469,13 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) {
@Override
public StrategyPriority canHandle(VMSnapshot vmSnapshot) {
+ UserVmVO vm = userVmDao.findById(vmSnapshot.getVmId());
+ String cantHandleLog = String.format("Default VM snapshot cannot handle VM snapshot for [%s]", vm);
+
+ if (isRunningVMVolumeOnCLVMStorage(vm, cantHandleLog)) {
+ return StrategyPriority.CANT_HANDLE;
+ }
+
return StrategyPriority.DEFAULT;
}
@@ -493,10 +501,31 @@ public boolean deleteVMSnapshotFromDB(VMSnapshot vmSnapshot, boolean unmanage) {
return vmSnapshotDao.remove(vmSnapshot.getId());
}
+ protected boolean isRunningVMVolumeOnCLVMStorage(UserVmVO vm, String cantHandleLog) {
+ Long vmId = vm.getId();
+ if (State.Running.equals(vm.getState())) {
+ List volumes = volumeDao.findByInstance(vmId);
+ for (VolumeVO volume : volumes) {
+ StoragePool pool = primaryDataStoreDao.findById(volume.getPoolId());
+ if (pool != null && pool.getPoolType() == Storage.StoragePoolType.CLVM) {
+ logger.warn("Rejecting VM snapshot request: {} - VM is running on CLVM storage (pool: {}, poolType: CLVM)",
+ cantHandleLog, pool.getName());
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
@Override
public StrategyPriority canHandle(Long vmId, Long rootPoolId, boolean snapshotMemory) {
UserVmVO vm = userVmDao.findById(vmId);
String cantHandleLog = String.format("Default VM snapshot cannot handle VM snapshot for [%s]", vm);
+
+ if (isRunningVMVolumeOnCLVMStorage(vm, cantHandleLog)) {
+ return StrategyPriority.CANT_HANDLE;
+ }
+
if (State.Running.equals(vm.getState()) && !snapshotMemory) {
logger.debug("{} as it is running and its memory will not be affected.", cantHandleLog, vm);
return StrategyPriority.CANT_HANDLE;
diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java
index e3f28a7012c2..7a9cb4601543 100644
--- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java
+++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java
@@ -345,6 +345,13 @@ public StrategyPriority canHandle(VMSnapshot vmSnapshot) {
}
}
+ Long vmId = vmSnapshot.getVmId();
+ UserVmVO vm = userVmDao.findById(vmId);
+ String cantHandleLog = String.format("Storage VM snapshot strategy cannot handle VM snapshot for [%s]", vm);
+ if (vm != null && isRunningVMVolumeOnCLVMStorage(vm, cantHandleLog)) {
+ return StrategyPriority.CANT_HANDLE;
+ }
+
if ( SnapshotManager.VmStorageSnapshotKvm.value() && userVm.getHypervisorType() == Hypervisor.HypervisorType.KVM
&& vmSnapshot.getType() == VMSnapshot.Type.Disk) {
return StrategyPriority.HYPERVISOR;
diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
index 061d18dc3769..1cac9701f4d1 100644
--- a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
+++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
@@ -32,8 +32,10 @@
import com.cloud.dc.DedicatedResourceVO;
import com.cloud.dc.dao.DedicatedResourceDao;
+import com.cloud.storage.Storage;
import com.cloud.user.Account;
import com.cloud.utils.Pair;
+import com.cloud.utils.db.QueryBuilder;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@@ -46,6 +48,7 @@
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.storage.LocalHostEndpoint;
import org.apache.cloudstack.storage.RemoteHostEndPoint;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.springframework.stereotype.Component;
@@ -59,8 +62,8 @@
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage.TemplateType;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import com.cloud.utils.db.DB;
-import com.cloud.utils.db.QueryBuilder;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.exception.CloudRuntimeException;
@@ -75,6 +78,8 @@ public class DefaultEndPointSelector implements EndPointSelector {
private HostDao hostDao;
@Inject
private DedicatedResourceDao dedicatedResourceDao;
+ @Inject
+ private PrimaryDataStoreDao _storagePoolDao;
private static final String VOL_ENCRYPT_COLUMN_NAME = "volume_encryption_support";
private final String findOneHostOnPrimaryStorage = "select t.id from "
@@ -264,6 +269,16 @@ public EndPoint select(DataObject srcData, DataObject destData) {
@Override
public EndPoint select(DataObject srcData, DataObject destData, boolean volumeEncryptionSupportRequired) {
+ // FOR CLVM: Check if destination is a volume with destinationHostId hint
+ // This ensures template-to-volume copy is routed to the correct host for optimal lock placement
+ if (destData instanceof VolumeInfo) {
+ EndPoint clvmEndpoint = selectClvmEndpointIfApplicable((VolumeInfo) destData, "template-to-volume copy");
+ if (clvmEndpoint != null) {
+ return clvmEndpoint;
+ }
+ }
+
+ // Default behavior for non-CLVM or when no destination host is set
DataStore srcStore = srcData.getDataStore();
DataStore destStore = destData.getDataStore();
if (moveBetweenPrimaryImage(srcStore, destStore)) {
@@ -388,9 +403,59 @@ private List listUpAndConnectingSecondaryStorageVmHost(Long dcId) {
return sc.list();
}
+ /**
+ * Selects endpoint for CLVM volumes with destination host hint.
+ * This ensures volumes are created on the correct host with exclusive locks.
+ *
+ * @param volume The volume to check for CLVM routing
+ * @param operation Description of the operation (for logging)
+ * @return EndPoint for the destination host if CLVM routing applies, null otherwise
+ */
+ private EndPoint selectClvmEndpointIfApplicable(VolumeInfo volume, String operation) {
+ DataStore store = volume.getDataStore();
+
+ if (store.getRole() != DataStoreRole.Primary) {
+ return null;
+ }
+
+ // Check if this is a CLVM pool
+ StoragePoolVO pool = _storagePoolDao.findById(store.getId());
+ if (pool == null || pool.getPoolType() != Storage.StoragePoolType.CLVM) {
+ return null;
+ }
+
+ // Check if destination host hint is set
+ Long destHostId = volume.getDestinationHostId();
+ if (destHostId == null) {
+ return null;
+ }
+
+ logger.info("CLVM {}: routing volume {} to destination host {} for optimal exclusive lock placement",
+ operation, volume.getUuid(), destHostId);
+
+ EndPoint ep = getEndPointFromHostId(destHostId);
+ if (ep != null) {
+ return ep;
+ }
+
+ logger.warn("Could not get endpoint for destination host {}, falling back to default selection", destHostId);
+ return null;
+ }
+
@Override
public EndPoint select(DataObject object, boolean encryptionSupportRequired) {
DataStore store = object.getDataStore();
+
+ // For CLVM volumes with destination host hint, route to that specific host
+ // This ensures volumes are created on the correct host with exclusive locks
+ if (object instanceof VolumeInfo && store.getRole() == DataStoreRole.Primary) {
+ EndPoint clvmEndpoint = selectClvmEndpointIfApplicable((VolumeInfo) object, "volume creation");
+ if (clvmEndpoint != null) {
+ return clvmEndpoint;
+ }
+ }
+
+ // Default behavior for non-CLVM or when no destination host is set
if (store.getRole() == DataStoreRole.Primary) {
return findEndPointInScope(store.getScope(), findOneHostOnPrimaryStorage, store.getId(), encryptionSupportRequired);
}
diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java
index 43218b3f6a02..678ba12ef914 100644
--- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java
+++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java
@@ -126,6 +126,7 @@ public class VolumeObject implements VolumeInfo {
private boolean directDownload;
private String vSphereStoragePolicyId;
private boolean followRedirects;
+ private Long destinationHostId; // For CLVM: hints where volume should be created
private List checkpointPaths;
private Set checkpointImageStoreUrls;
@@ -361,6 +362,27 @@ public void setDirectDownload(boolean directDownload) {
this.directDownload = directDownload;
}
+ @Override
+ public Long getDestinationHostId() {
+ // If not in memory, try to load from database (volume_details table)
+ if (destinationHostId == null && volumeVO != null) {
+ VolumeDetailVO detail = volumeDetailsDao.findDetail(volumeVO.getId(), DESTINATION_HOST_ID);
+ if (detail != null && detail.getValue() != null && !detail.getValue().isEmpty()) {
+ try {
+ destinationHostId = Long.parseLong(detail.getValue());
+ } catch (NumberFormatException e) {
+ logger.warn("Invalid destinationHostId value in volume_details for volume {}: {}", volumeVO.getUuid(), detail.getValue());
+ }
+ }
+ }
+ return destinationHostId;
+ }
+
+ @Override
+ public void setDestinationHostId(Long hostId) {
+ this.destinationHostId = hostId;
+ }
+
public void update() {
volumeDao.update(volumeVO.getId(), volumeVO);
volumeVO = volumeDao.findById(volumeVO.getId());
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
index 0a9e0d2d98e6..92b1c7b4b6a5 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
@@ -6523,4 +6523,198 @@ public String getHypervisorPath() {
public String getGuestCpuArch() {
return guestCpuArch;
}
+
+ /**
+ * CLVM volume state for migration operations on source host
+ */
+ public enum ClvmVolumeState {
+ /** Shared mode (-asy) - used before migration to allow both hosts to access volume */
+ SHARED("-asy", "shared", "Before migration: activating in shared mode"),
+
+ /** Deactivate (-an) - used after successful migration to release volume on source */
+ DEACTIVATE("-an", "deactivated", "After successful migration: deactivating volume"),
+
+ /** Exclusive mode (-aey) - used after failed migration to revert to original exclusive state */
+ EXCLUSIVE("-aey", "exclusive", "After failed migration: reverting to exclusive mode");
+
+ private final String lvchangeFlag;
+ private final String description;
+ private final String logMessage;
+
+ ClvmVolumeState(String lvchangeFlag, String description, String logMessage) {
+ this.lvchangeFlag = lvchangeFlag;
+ this.description = description;
+ this.logMessage = logMessage;
+ }
+
+ public String getLvchangeFlag() {
+ return lvchangeFlag;
+ }
+
+ public String getDescription() {
+ return description;
+ }
+
+ public String getLogMessage() {
+ return logMessage;
+ }
+ }
+
+ public static void modifyClvmVolumesStateForMigration(List disks, LibvirtComputingResource resource,
+ VirtualMachineTO vmSpec, ClvmVolumeState state) {
+ for (DiskDef disk : disks) {
+ if (isClvmVolume(disk, resource, vmSpec)) {
+ String volumePath = disk.getDiskPath();
+ try {
+ LOGGER.info("[CLVM Migration] {} for volume [{}]",
+ state.getLogMessage(), volumePath);
+
+ Script cmd = new Script("lvchange", Duration.standardSeconds(300), LOGGER);
+ cmd.add(state.getLvchangeFlag());
+ cmd.add(volumePath);
+
+ String result = cmd.execute();
+ if (result != null) {
+ LOGGER.error("[CLVM Migration] Failed to set volume [{}] to {} state. Command result: {}",
+ volumePath, state.getDescription(), result);
+ } else {
+ LOGGER.info("[CLVM Migration] Successfully set volume [{}] to {} state.",
+ volumePath, state.getDescription());
+ }
+ } catch (Exception e) {
+ LOGGER.error("[CLVM Migration] Exception while setting volume [{}] to {} state: {}",
+ volumePath, state.getDescription(), e.getMessage(), e);
+ }
+ }
+ }
+ }
+
+ /**
+ * Determines if a disk is on a CLVM storage pool by checking the actual pool type from VirtualMachineTO.
+ * This is the most reliable method as it uses CloudStack's own storage pool information.
+ *
+ * @param disk The disk definition to check
+ * @param resource The LibvirtComputingResource instance (unused but kept for compatibility)
+ * @param vmSpec The VirtualMachineTO specification containing disk and pool information
+ * @return true if the disk is on a CLVM storage pool, false otherwise
+ */
+ private static boolean isClvmVolume(DiskDef disk, LibvirtComputingResource resource, VirtualMachineTO vmSpec) {
+ String diskPath = disk.getDiskPath();
+ if (diskPath == null || vmSpec == null) {
+ return false;
+ }
+
+ try {
+ if (vmSpec.getDisks() != null) {
+ for (DiskTO diskTO : vmSpec.getDisks()) {
+ if (diskTO.getData() instanceof VolumeObjectTO) {
+ VolumeObjectTO volumeTO = (VolumeObjectTO) diskTO.getData();
+ if (diskPath.equals(volumeTO.getPath()) || diskPath.equals(diskTO.getPath())) {
+ DataStoreTO dataStore = volumeTO.getDataStore();
+ if (dataStore instanceof PrimaryDataStoreTO) {
+ PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO) dataStore;
+ boolean isClvm = StoragePoolType.CLVM == primaryStore.getPoolType();
+ LOGGER.debug("Disk {} identified as CLVM={} via VirtualMachineTO pool type: {}",
+ diskPath, isClvm, primaryStore.getPoolType());
+ return isClvm;
+ }
+ }
+ }
+ }
+ }
+
+ // Fallback: Check VG attributes using vgs command (reliable)
+ // CLVM VGs have the 'c' (clustered) or 's' (shared) flag in their attributes
+ // Example: 'wz--ns' = shared, 'wz--n-' = not clustered
+ if (diskPath.startsWith("/dev/") && !diskPath.contains("/dev/mapper/")) {
+ String vgName = extractVolumeGroupFromPath(diskPath);
+ if (vgName != null) {
+ boolean isClustered = checkIfVolumeGroupIsClustered(vgName);
+ LOGGER.debug("Disk {} VG {} identified as clustered={} via vgs attribute check",
+ diskPath, vgName, isClustered);
+ return isClustered;
+ }
+ }
+
+ } catch (Exception e) {
+ LOGGER.error("Error determining if volume {} is CLVM: {}", diskPath, e.getMessage(), e);
+ }
+
+ return false;
+ }
+
+ /**
+ * Extracts the volume group name from a device path.
+ *
+ * @param devicePath The device path (e.g., /dev/vgname/lvname)
+ * @return The volume group name, or null if cannot be determined
+ */
+ static String extractVolumeGroupFromPath(String devicePath) {
+ if (devicePath == null || !devicePath.startsWith("/dev/")) {
+ return null;
+ }
+
+ // Format: /dev//
+ String[] parts = devicePath.split("/");
+ if (parts.length >= 3) {
+ return parts[2]; // ["", "dev", "vgname", ...]
+ }
+
+ return null;
+ }
+
+ /**
+ * Checks if a volume group is clustered (CLVM) by examining its attributes.
+ * Uses 'vgs' command to check for the clustered/shared flag in VG attributes.
+ *
+ * VG Attr format (6 characters): wz--nc or wz--ns
+ * Position 6: Clustered flag - 'c' = CLVM (clustered), 's' = shared (lvmlockd), '-' = not clustered
+ *
+ * @param vgName The volume group name
+ * @return true if the VG is clustered or shared, false otherwise
+ */
+ static boolean checkIfVolumeGroupIsClustered(String vgName) {
+ if (vgName == null) {
+ return false;
+ }
+
+ try {
+ // Use vgs with --noheadings and -o attr to get VG attributes
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ Script vgsCmd = new Script("vgs", 5000, LOGGER);
+ vgsCmd.add("--noheadings");
+ vgsCmd.add("--unbuffered");
+ vgsCmd.add("-o");
+ vgsCmd.add("vg_attr");
+ vgsCmd.add(vgName);
+
+ String result = vgsCmd.execute(parser);
+
+ if (result == null && parser.getLines() != null) {
+ String output = parser.getLines();
+ if (output != null && !output.isEmpty()) {
+ // Parse VG attributes (format: wz--nc or wz--ns or wz--n-)
+ // Position 6 (0-indexed 5) indicates clustering/sharing:
+ // 'c' = clustered (CLVM) or 's' = shared (lvmlockd) or '-' = not clustered/shared
+ String vgAttr = output.trim();
+ if (vgAttr.length() >= 6) {
+ char clusterFlag = vgAttr.charAt(5); // Position 6 (0-indexed 5)
+ boolean isClustered = (clusterFlag == 'c' || clusterFlag == 's');
+ LOGGER.debug("VG {} has attributes '{}', cluster/shared flag '{}' = {}",
+ vgName, vgAttr, clusterFlag, isClustered);
+ return isClustered;
+ } else {
+ LOGGER.warn("VG {} attributes '{}' have unexpected format (expected 6+ chars)", vgName, vgAttr);
+ }
+ }
+ } else {
+ LOGGER.warn("Failed to get VG attributes for {}: {}", vgName, result);
+ }
+
+ } catch (Exception e) {
+ LOGGER.debug("Error checking if VG {} is clustered: {}", vgName, e.getMessage());
+ }
+
+ return false;
+ }
}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtClvmLockTransferCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtClvmLockTransferCommandWrapper.java
new file mode 100644
index 000000000000..1ef8ce59b592
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtClvmLockTransferCommandWrapper.java
@@ -0,0 +1,90 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
+
+import com.cloud.agent.api.Answer;
+import org.apache.cloudstack.storage.command.ClvmLockTransferCommand;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import com.cloud.utils.script.Script;
+
+@ResourceWrapper(handles = ClvmLockTransferCommand.class)
+public class LibvirtClvmLockTransferCommandWrapper
+ extends CommandWrapper {
+
+ protected Logger logger = LogManager.getLogger(getClass());
+
+ @Override
+ public Answer execute(ClvmLockTransferCommand cmd, LibvirtComputingResource serverResource) {
+ String lvPath = cmd.getLvPath();
+ ClvmLockTransferCommand.Operation operation = cmd.getOperation();
+ String volumeUuid = cmd.getVolumeUuid();
+
+ logger.info(String.format("Executing CLVM lock transfer: operation=%s, lv=%s, volume=%s",
+ operation, lvPath, volumeUuid));
+
+ try {
+ String lvchangeOpt;
+ String operationDesc;
+ switch (operation) {
+ case DEACTIVATE:
+ lvchangeOpt = "-an";
+ operationDesc = "deactivated";
+ break;
+ case ACTIVATE_EXCLUSIVE:
+ lvchangeOpt = "-aey";
+ operationDesc = "activated exclusively";
+ break;
+ case ACTIVATE_SHARED:
+ lvchangeOpt = "-asy";
+ operationDesc = "activated in shared mode";
+ break;
+ default:
+ return new Answer(cmd, false, "Unknown operation: " + operation);
+ }
+
+ Script script = new Script("/usr/sbin/lvchange", 30000, logger);
+ script.add(lvchangeOpt);
+ script.add(lvPath);
+
+ String result = script.execute();
+
+ if (result != null) {
+ logger.error("CLVM lock transfer failed for volume {}: {}}",
+ volumeUuid, result);
+ return new Answer(cmd, false,
+ String.format("lvchange %s %s failed: %s", lvchangeOpt, lvPath, result));
+ }
+
+ logger.info("Successfully executed CLVM lock transfer: {} {}} for volume {}}",
+ lvchangeOpt, lvPath, volumeUuid);
+
+ return new Answer(cmd, true,
+ String.format("Successfully %s CLVM volume %s", operationDesc, volumeUuid));
+
+ } catch (Exception e) {
+ logger.error("Exception during CLVM lock transfer for volume {}: {}}",
+ volumeUuid, e.getMessage(), e);
+ return new Answer(cmd, false, "Exception: " + e.getMessage());
+ }
+ }
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
index 43607edc53a5..9540b2265f46 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
@@ -1,5 +1,3 @@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
@@ -42,9 +40,15 @@
import javax.xml.transform.TransformerException;
import com.cloud.agent.api.VgpuTypesInfo;
+import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.GPUDeviceTO;
import com.cloud.hypervisor.kvm.resource.LibvirtGpuDef;
import com.cloud.hypervisor.kvm.resource.LibvirtXMLParser;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import com.cloud.utils.Ternary;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.VirtualMachine;
import org.apache.cloudstack.utils.security.ParserUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.collections4.CollectionUtils;
@@ -69,7 +73,6 @@
import com.cloud.agent.api.MigrateAnswer;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
-import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.agent.api.to.DpdkTO;
import com.cloud.agent.api.to.VirtualMachineTO;
@@ -82,11 +85,6 @@
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef;
import com.cloud.hypervisor.kvm.resource.MigrateKVMAsync;
import com.cloud.hypervisor.kvm.resource.VifDriver;
-import com.cloud.resource.CommandWrapper;
-import com.cloud.resource.ResourceWrapper;
-import com.cloud.utils.Ternary;
-import com.cloud.utils.exception.CloudRuntimeException;
-import com.cloud.vm.VirtualMachine;
@ResourceWrapper(handles = MigrateCommand.class)
public final class LibvirtMigrateCommandWrapper extends CommandWrapper {
@@ -117,7 +115,8 @@ public Answer execute(final MigrateCommand command, final LibvirtComputingResour
Command.State commandState = null;
List ifaces = null;
- List disks;
+ List disks = new ArrayList<>();
+ VirtualMachineTO to = null;
Domain dm = null;
Connect dconn = null;
@@ -136,7 +135,7 @@ public Answer execute(final MigrateCommand command, final LibvirtComputingResour
if (logger.isDebugEnabled()) {
logger.debug(String.format("Found domain with name [%s]. Starting VM migration to host [%s].", vmName, destinationUri));
}
- VirtualMachineTO to = command.getVirtualMachine();
+ to = command.getVirtualMachine();
dm = conn.domainLookupByName(vmName);
/*
@@ -239,6 +238,9 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0.
libvirtComputingResource.detachAndAttachConfigDriveISO(conn, vmName);
}
+ // Activate CLVM volumes in shared mode before migration starts
+ LibvirtComputingResource.modifyClvmVolumesStateForMigration(disks, libvirtComputingResource, to, LibvirtComputingResource.ClvmVolumeState.SHARED);
+
//run migration in thread so we can monitor it
logger.info("Starting live migration of instance {} to destination host {} having the final XML configuration: {}.", vmName, dconn.getURI(), maskSensitiveInfoInXML(xmlDesc));
final ExecutorService executor = Executors.newFixedThreadPool(1);
@@ -336,6 +338,12 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0.
logger.debug(String.format("Cleaning the disks of VM [%s] in the source pool after VM migration finished.", vmName));
}
resumeDomainIfPaused(destDomain, vmName);
+
+ // Deactivate CLVM volumes on source host after successful migration
+ if (to != null) {
+ LibvirtComputingResource.modifyClvmVolumesStateForMigration(disks, libvirtComputingResource, to, LibvirtComputingResource.ClvmVolumeState.DEACTIVATE);
+ }
+
deleteOrDisconnectDisksOnSourcePool(libvirtComputingResource, migrateDiskInfoList, disks);
libvirtComputingResource.cleanOldSecretsByDiskDef(conn, disks);
}
@@ -382,6 +390,10 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0.
if (destDomain != null) {
destDomain.free();
}
+ // Revert CLVM volumes to exclusive mode on failure
+ if (to != null) {
+ LibvirtComputingResource.modifyClvmVolumesStateForMigration(disks, libvirtComputingResource, to, LibvirtComputingResource.ClvmVolumeState.EXCLUSIVE);
+ }
} catch (final LibvirtException e) {
logger.trace("Ignoring libvirt error.", e);
}
@@ -681,7 +693,7 @@ protected String replaceDpdkInterfaces(String xmlDesc, Map dpdkP
protected void deleteOrDisconnectDisksOnSourcePool(final LibvirtComputingResource libvirtComputingResource, final List migrateDiskInfoList,
List disks) {
for (DiskDef disk : disks) {
- MigrateDiskInfo migrateDiskInfo = searchDiskDefOnMigrateDiskInfoList(migrateDiskInfoList, disk);
+ MigrateCommand.MigrateDiskInfo migrateDiskInfo = searchDiskDefOnMigrateDiskInfoList(migrateDiskInfoList, disk);
if (migrateDiskInfo != null && migrateDiskInfo.isSourceDiskOnStorageFileSystem()) {
deleteLocalVolume(disk.getDiskPath());
} else {
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPostMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPostMigrationCommandWrapper.java
new file mode 100644
index 000000000000..2492dce47e56
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPostMigrationCommandWrapper.java
@@ -0,0 +1,83 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import java.util.List;
+
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
+import org.libvirt.Connect;
+import org.libvirt.LibvirtException;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.PostMigrationAnswer;
+import com.cloud.agent.api.PostMigrationCommand;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
+import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+
+/**
+ * Wrapper for PostMigrationCommand on KVM hypervisor.
+ * Handles post-migration tasks on the destination host after a VM has been successfully migrated.
+ * Primary responsibility: Convert CLVM volumes from shared mode to exclusive mode on destination.
+ */
+@ResourceWrapper(handles = PostMigrationCommand.class)
+public final class LibvirtPostMigrationCommandWrapper extends CommandWrapper {
+
+ protected Logger logger = LogManager.getLogger(getClass());
+
+ @Override
+ public Answer execute(final PostMigrationCommand command, final LibvirtComputingResource libvirtComputingResource) {
+ final VirtualMachineTO vm = command.getVirtualMachine();
+ final String vmName = command.getVmName();
+
+ if (vm == null || vmName == null) {
+ return new PostMigrationAnswer(command, "VM or VM name is null");
+ }
+
+ logger.debug("Executing post-migration tasks for VM {} on destination host", vmName);
+
+ try {
+ final Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
+
+ List disks = libvirtComputingResource.getDisks(conn, vmName);
+ logger.debug("[CLVM Post-Migration] Processing volumes for VM {} to claim exclusive locks on any CLVM volumes", vmName);
+ LibvirtComputingResource.modifyClvmVolumesStateForMigration(
+ disks,
+ libvirtComputingResource,
+ vm,
+ LibvirtComputingResource.ClvmVolumeState.EXCLUSIVE
+ );
+
+ logger.debug("Successfully completed post-migration tasks for VM {}", vmName);
+ return new PostMigrationAnswer(command);
+
+ } catch (final LibvirtException e) {
+ logger.error("LibVirt error during post-migration for VM {}: {}", vmName, e.getMessage(), e);
+ return new PostMigrationAnswer(command, e);
+ } catch (final Exception e) {
+ logger.error("Error during post-migration for VM {}: {}", vmName, e.getMessage(), e);
+ return new PostMigrationAnswer(command, e);
+ }
+ }
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java
index d9323df4477d..03840cd7fdfa 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java
@@ -21,6 +21,7 @@
import java.net.URISyntaxException;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import org.apache.cloudstack.storage.configdrive.ConfigDrive;
@@ -124,6 +125,20 @@ public Answer execute(final PrepareForMigrationCommand command, final LibvirtCom
return new PrepareForMigrationAnswer(command, "failed to connect physical disks to host");
}
+ // Activate CLVM volumes in shared mode on destination host for live migration
+ try {
+ List disks = libvirtComputingResource.getDisks(conn, vm.getName());
+ LibvirtComputingResource.modifyClvmVolumesStateForMigration(
+ disks,
+ libvirtComputingResource,
+ vm,
+ LibvirtComputingResource.ClvmVolumeState.SHARED
+ );
+ } catch (Exception e) {
+ logger.warn("Failed to activate CLVM volumes in shared mode on destination for VM {}: {}",
+ vm.getName(), e.getMessage(), e);
+ }
+
logger.info("Successfully prepared destination host for migration of VM {}", vm.getName());
return createPrepareForMigrationAnswer(command, dpdkInterfaceMapping, libvirtComputingResource, vm);
} catch (final LibvirtException | CloudRuntimeException | InternalErrorException | URISyntaxException e) {
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
index 030d9747d6cd..3fca370fd7f7 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
@@ -1115,7 +1115,14 @@ public Answer backupSnapshot(final CopyCommand cmd) {
}
} else {
final Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), logger);
- command.add("-b", isCreatedFromVmSnapshot ? snapshotDisk.getPath() : snapshot.getPath());
+ String backupPath;
+ if (primaryPool.getType() == StoragePoolType.CLVM) {
+ backupPath = snapshotDisk.getPath();
+ logger.debug("Using snapshotDisk path for CLVM backup: " + backupPath);
+ } else {
+ backupPath = isCreatedFromVmSnapshot ? snapshotDisk.getPath() : snapshot.getPath();
+ }
+ command.add("-b", backupPath);
command.add(NAME_OPTION, snapshotName);
command.add("-p", snapshotDestPath);
@@ -1160,6 +1167,76 @@ public Answer backupSnapshot(final CopyCommand cmd) {
}
}
+ /**
+ * Delete a CLVM snapshot using comprehensive cleanup.
+ * For CLVM, the snapshot path stored in DB is: /dev/vgname/volumeuuid/snapshotuuid
+ * the actual snapshot LV created by CLVM is: /dev/vgname/md5(snapshotuuid)
+ *
+ * @param snapshotPath The snapshot path from database
+ * @param checkExistence If true, checks if snapshot exists before cleanup (for explicit deletion)
+ * If false, always performs cleanup (for post-backup cleanup)
+ * @return true if cleanup was performed, false if snapshot didn't exist (when checkExistence=true)
+ */
+ private boolean deleteClvmSnapshot(String snapshotPath, boolean checkExistence) {
+ logger.info("Starting CLVM snapshot deletion for path: {}, checkExistence: {}", snapshotPath, checkExistence);
+
+ try {
+ // Parse the snapshot path: /dev/acsvg/volume-uuid/snapshot-uuid
+ String[] pathParts = snapshotPath.split("/");
+ if (pathParts.length < 5) {
+ logger.warn("Invalid CLVM snapshot path format: {}, expected format: /dev/vgname/volume-uuid/snapshot-uuid", snapshotPath);
+ return false;
+ }
+
+ String vgName = pathParts[2];
+ String volumeUuid = pathParts[3];
+ String snapshotUuid = pathParts[4];
+
+ logger.info("Parsed snapshot path - VG: {}, Volume: {}, Snapshot: {}", vgName, volumeUuid, snapshotUuid);
+
+ // Compute MD5 hash of snapshot UUID (same as managesnapshot.sh does)
+ String md5Hash = computeMd5Hash(snapshotUuid);
+ logger.debug("Computed MD5 hash for snapshot UUID {}: {}", snapshotUuid, md5Hash);
+ String snapshotLvPath = vgName + "/" + md5Hash;
+ String actualSnapshotPath = "/dev/" + snapshotLvPath;
+
+ // Check if snapshot exists (if requested)
+ if (checkExistence) {
+ Script checkSnapshot = new Script("/usr/sbin/lvs", 5000, logger);
+ checkSnapshot.add("--noheadings");
+ checkSnapshot.add(snapshotLvPath);
+ String checkResult = checkSnapshot.execute();
+
+ if (checkResult != null) {
+ // Snapshot doesn't exist - was already cleaned up
+ logger.info("CLVM snapshot {} was already deleted, no cleanup needed", md5Hash);
+ return false;
+ }
+ logger.info("CLVM snapshot still exists for {}, performing cleanup", md5Hash);
+ }
+
+ // Use native LVM command to remove snapshot (handles all cleanup automatically)
+ Script removeSnapshot = new Script("lvremove", 10000, logger);
+ removeSnapshot.add("-f");
+ removeSnapshot.add(snapshotLvPath);
+
+ logger.info("Executing: lvremove -f {}", snapshotLvPath);
+ String removeResult = removeSnapshot.execute();
+
+ if (removeResult == null) {
+ logger.info("Successfully deleted CLVM snapshot: {} (actual path: {})", snapshotPath, actualSnapshotPath);
+ return true;
+ } else {
+ logger.warn("Failed to delete CLVM snapshot {}: {}", snapshotPath, removeResult);
+ return false;
+ }
+
+ } catch (Exception ex) {
+ logger.error("Exception while deleting CLVM snapshot {}", snapshotPath, ex);
+ return false;
+ }
+ }
+
private void deleteSnapshotOnPrimary(final CopyCommand cmd, final SnapshotObjectTO snapshot,
KVMStoragePool primaryPool) {
String snapshotPath = snapshot.getPath();
@@ -1172,7 +1249,14 @@ private void deleteSnapshotOnPrimary(final CopyCommand cmd, final SnapshotObject
if ((backupSnapshotAfterTakingSnapshot == null || BooleanUtils.toBoolean(backupSnapshotAfterTakingSnapshot)) && deleteSnapshotOnPrimary) {
try {
- Files.deleteIfExists(Paths.get(snapshotPath));
+ if (primaryPool.getType() == StoragePoolType.CLVM) {
+ boolean cleanedUp = deleteClvmSnapshot(snapshotPath, false);
+ if (!cleanedUp) {
+ logger.info("No need to delete CLVM snapshot on primary as it doesn't exist: {}", snapshotPath);
+ }
+ } else {
+ Files.deleteIfExists(Paths.get(snapshotPath));
+ }
} catch (IOException ex) {
logger.error("Failed to delete snapshot [{}] on primary storage [{}].", snapshot.getId(), snapshot.getName(), ex);
}
@@ -1181,6 +1265,42 @@ private void deleteSnapshotOnPrimary(final CopyCommand cmd, final SnapshotObject
}
}
+
+ /**
+ * Compute MD5 hash of a string, matching what managesnapshot.sh does:
+ * echo "${snapshot}" | md5sum -t | awk '{ print $1 }'
+ */
+ private String computeMd5Hash(String input) {
+ try {
+ java.security.MessageDigest md = java.security.MessageDigest.getInstance("MD5");
+ byte[] array = md.digest((input + "\n").getBytes("UTF-8"));
+ StringBuilder sb = new StringBuilder();
+ for (byte b : array) {
+ sb.append(String.format("%02x", b));
+ }
+ return sb.toString();
+ } catch (Exception e) {
+ logger.error("Failed to compute MD5 hash for: {}", input, e);
+ return input;
+ }
+ }
+
+ /**
+ * Extract VG name from CLVM disk path.
+ * Path format: /dev/vgname/volumeuuid
+ * Returns: vgname
+ */
+ private String extractVgNameFromDiskPath(String diskPath) {
+ if (diskPath == null || diskPath.isEmpty()) {
+ return null;
+ }
+ String[] pathParts = diskPath.split("/");
+ if (pathParts.length >= 3) {
+ return pathParts[2]; // /dev/vgname/volumeuuid -> vgname
+ }
+ return null;
+ }
+
protected synchronized void attachOrDetachISO(final Connect conn, final String vmName, String isoPath, final boolean isAttach, Map params, DataStoreTO store) throws
LibvirtException, InternalErrorException {
DiskDef iso = new DiskDef();
@@ -1842,8 +1962,10 @@ public Answer createSnapshot(final CreateObjectCommand cmd) {
}
}
- if (DomainInfo.DomainState.VIR_DOMAIN_RUNNING.equals(state) && volume.requiresEncryption()) {
- throw new CloudRuntimeException("VM is running, encrypted volume snapshots aren't supported");
+ if (DomainInfo.DomainState.VIR_DOMAIN_RUNNING.equals(state)) {
+ if (volume.requiresEncryption()) {
+ throw new CloudRuntimeException("VM is running, encrypted volume snapshots aren't supported");
+ }
}
KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
@@ -2880,6 +3002,25 @@ public Answer deleteSnapshot(final DeleteCommand cmd) {
if (snapshotTO.isKvmIncrementalSnapshot()) {
deleteCheckpoint(snapshotTO);
}
+ } else if (primaryPool.getType() == StoragePoolType.CLVM) {
+ // For CLVM, snapshots are typically already deleted from primary storage during backup
+ // via deleteSnapshotOnPrimary in the backupSnapshot finally block.
+ // This is called when the user explicitly deletes the snapshot via UI/API.
+ // We check if the snapshot still exists and clean it up if needed.
+ logger.info("Processing CLVM snapshot deletion (id={}, name={}, path={}) on primary storage",
+ snapshotTO.getId(), snapshotTO.getName(), snapshotTO.getPath());
+
+ String snapshotPath = snapshotTO.getPath();
+ if (snapshotPath != null && !snapshotPath.isEmpty()) {
+ boolean wasDeleted = deleteClvmSnapshot(snapshotPath, true);
+ if (wasDeleted) {
+ logger.info("Successfully cleaned up CLVM snapshot {} from primary storage", snapshotName);
+ } else {
+ logger.info("CLVM snapshot {} was already deleted from primary storage during backup, no cleanup needed", snapshotName);
+ }
+ } else {
+ logger.debug("CLVM snapshot path is null or empty, assuming already cleaned up");
+ }
} else {
logger.warn("Operation not implemented for storage pool type of " + primaryPool.getType().toString());
throw new InternalErrorException("Operation not implemented for storage pool type of " + primaryPool.getType().toString());
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
index a03daeb197bf..00f1cb9fa814 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
@@ -34,6 +34,7 @@
import com.cloud.agent.properties.AgentProperties;
import com.cloud.agent.properties.AgentPropertiesFileHandler;
+import com.cloud.utils.script.OutputInterpreter;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.utils.cryptsetup.KeyFile;
import org.apache.cloudstack.utils.qemu.QemuImageOptions;
@@ -254,9 +255,12 @@ public StorageVol getVolume(StoragePool pool, String volName) {
try {
vol = pool.storageVolLookupByName(volName);
- logger.debug("Found volume " + volName + " in storage pool " + pool.getName() + " after refreshing the pool");
+ if (vol != null) {
+ logger.debug("Found volume " + volName + " in storage pool " + pool.getName() + " after refreshing the pool");
+ }
} catch (LibvirtException e) {
- throw new CloudRuntimeException("Could not find volume " + volName + ": " + e.getMessage());
+ logger.debug("Volume " + volName + " still not found after pool refresh: " + e.getMessage());
+ return null;
}
}
@@ -663,6 +667,17 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) {
try {
StorageVol vol = getVolume(libvirtPool.getPool(), volumeUuid);
+
+ // Check if volume was found - if null, treat as not found and trigger fallback for CLVM
+ if (vol == null) {
+ logger.debug("Volume " + volumeUuid + " not found in libvirt, will check for CLVM fallback");
+ if (pool.getType() == StoragePoolType.CLVM) {
+ return getPhysicalDisk(volumeUuid, pool, libvirtPool);
+ }
+
+ throw new CloudRuntimeException("Volume " + volumeUuid + " not found in libvirt pool");
+ }
+
KVMPhysicalDisk disk;
LibvirtStorageVolumeDef voldef = getStorageVolumeDef(libvirtPool.getPool().getConnect(), vol);
disk = new KVMPhysicalDisk(vol.getPath(), vol.getName(), pool);
@@ -693,11 +708,156 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) {
}
return disk;
} catch (LibvirtException e) {
- logger.debug("Failed to get physical disk:", e);
+ logger.debug("Failed to get volume from libvirt: " + e.getMessage());
+ // For CLVM, try direct block device access as fallback
+ if (pool.getType() == StoragePoolType.CLVM) {
+ return getPhysicalDisk(volumeUuid, pool, libvirtPool);
+ }
+
throw new CloudRuntimeException(e.toString());
}
}
+ private KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool, LibvirtStoragePool libvirtPool) {
+ logger.info("CLVM volume not visible to libvirt, attempting direct block device access for volume: {}", volumeUuid);
+
+ try {
+ logger.debug("Refreshing libvirt storage pool: {}", pool.getUuid());
+ libvirtPool.getPool().refresh(0);
+
+ StorageVol vol = getVolume(libvirtPool.getPool(), volumeUuid);
+ if (vol != null) {
+ logger.info("Volume found after pool refresh: {}", volumeUuid);
+ KVMPhysicalDisk disk;
+ LibvirtStorageVolumeDef voldef = getStorageVolumeDef(libvirtPool.getPool().getConnect(), vol);
+ disk = new KVMPhysicalDisk(vol.getPath(), vol.getName(), pool);
+ disk.setSize(vol.getInfo().allocation);
+ disk.setVirtualSize(vol.getInfo().capacity);
+ disk.setFormat(voldef.getFormat() == LibvirtStorageVolumeDef.VolumeFormat.QCOW2 ?
+ PhysicalDiskFormat.QCOW2 : PhysicalDiskFormat.RAW);
+ return disk;
+ }
+ } catch (LibvirtException refreshEx) {
+ logger.debug("Pool refresh failed or volume still not found: {}", refreshEx.getMessage());
+ }
+
+ // Still not found after refresh, try direct block device access
+ return getPhysicalDiskViaDirectBlockDevice(volumeUuid, pool);
+ }
+
+ private String getVgName(KVMStoragePool pool, String sourceDir) {
+ String vgName = sourceDir;
+ if (vgName.startsWith("/")) {
+ String[] parts = vgName.split("/");
+ List tokens = Arrays.stream(parts)
+ .filter(s -> !s.isEmpty()).collect(Collectors.toList());
+
+ vgName = tokens.size() > 1 ? tokens.get(1)
+ : tokens.size() == 1 ? tokens.get(0)
+ : "";
+ }
+ return vgName;
+ }
+
+ /**
+ * For CLVM volumes that exist in LVM but are not visible to libvirt,
+ * access them directly via block device path.
+ */
+ private KVMPhysicalDisk getPhysicalDiskViaDirectBlockDevice(String volumeUuid, KVMStoragePool pool) {
+ try {
+ // For CLVM, pool sourceDir contains the VG path (e.g., "/dev/acsvg")
+ // Extract the VG name
+ String sourceDir = pool.getLocalPath();
+ if (sourceDir == null || sourceDir.isEmpty()) {
+ throw new CloudRuntimeException("CLVM pool sourceDir is not set, cannot determine VG name");
+ }
+ String vgName = getVgName(pool, sourceDir);
+ logger.debug("Using VG name: {} (from sourceDir: {}) ", vgName, sourceDir);
+
+ // Check if the LV exists in LVM using lvs command
+ logger.debug("Checking if volume {} exists in VG {}", volumeUuid, vgName);
+ Script checkLvCmd = new Script("/usr/sbin/lvs", 5000, logger);
+ checkLvCmd.add("--noheadings");
+ checkLvCmd.add("--unbuffered");
+ checkLvCmd.add(vgName + "/" + volumeUuid);
+
+ String checkResult = checkLvCmd.execute();
+ if (checkResult != null) {
+ logger.debug("Volume {} does not exist in VG {}: {}", volumeUuid, vgName, checkResult);
+ throw new CloudRuntimeException(String.format("Storage volume not found: no storage vol with matching name '%s'", volumeUuid));
+ }
+
+ logger.info("Volume {} exists in LVM but not visible to libvirt, accessing directly", volumeUuid);
+
+ // Try standard device path first
+ String lvPath = "/dev/" + vgName + "/" + volumeUuid;
+ File lvDevice = new File(lvPath);
+
+ if (!lvDevice.exists()) {
+ // Try device-mapper path with escaped hyphens
+ String vgNameEscaped = vgName.replace("-", "--");
+ String volumeUuidEscaped = volumeUuid.replace("-", "--");
+ lvPath = "/dev/mapper/" + vgNameEscaped + "-" + volumeUuidEscaped;
+ lvDevice = new File(lvPath);
+
+ if (!lvDevice.exists()) {
+ logger.warn("Volume exists in LVM but device node not found: {}", volumeUuid);
+ throw new CloudRuntimeException(String.format("Could not find volume %s " +
+ "in VG %s - volume exists in LVM but device node not accessible", volumeUuid, vgName));
+ }
+ }
+
+ long size = 0;
+ try {
+ Script lvsCmd = new Script("/usr/sbin/lvs", 5000, logger);
+ lvsCmd.add("--noheadings");
+ lvsCmd.add("--units");
+ lvsCmd.add("b");
+ lvsCmd.add("-o");
+ lvsCmd.add("lv_size");
+ lvsCmd.add(lvPath);
+
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ String result = lvsCmd.execute(parser);
+
+ String output = null;
+ if (result == null) {
+ output = parser.getLines();
+ } else {
+ output = result;
+ }
+
+ if (output != null && !output.isEmpty()) {
+ String sizeStr = output.trim().replaceAll("[^0-9]", "");
+ if (!sizeStr.isEmpty()) {
+ size = Long.parseLong(sizeStr);
+ }
+ }
+ } catch (Exception sizeEx) {
+ logger.warn("Failed to get size for CLVM volume via lvs: {}", sizeEx.getMessage());
+ if (lvDevice.isFile()) {
+ size = lvDevice.length();
+ }
+ }
+
+ KVMPhysicalDisk disk = new KVMPhysicalDisk(lvPath, volumeUuid, pool);
+ disk.setFormat(PhysicalDiskFormat.RAW);
+ disk.setSize(size);
+ disk.setVirtualSize(size);
+
+ logger.info("Successfully accessed CLVM volume via direct block device: {} " +
+ "with size: {} bytes",lvPath, size);
+
+ return disk;
+
+ } catch (CloudRuntimeException ex) {
+ throw ex;
+ } catch (Exception ex) {
+ logger.error("Failed to access CLVM volume via direct block device: {}",volumeUuid, ex);
+ throw new CloudRuntimeException(String.format("Could not find volume %s: %s ",volumeUuid, ex.getMessage()));
+ }
+ }
+
/**
* adjust refcount
*/
@@ -1046,6 +1206,12 @@ private KVMPhysicalDisk createPhysicalDiskByLibVirt(String name, KVMStoragePool
volName = vol.getName();
volAllocation = vol.getInfo().allocation;
volCapacity = vol.getInfo().capacity;
+
+ // For CLVM volumes, activate in shared mode so all cluster hosts can access it
+ if (pool.getType() == StoragePoolType.CLVM) {
+ logger.info("Activating CLVM volume {} in shared mode for cluster-wide access", volPath);
+ activateClvmVolumeInSharedMode(volPath);
+ }
} catch (LibvirtException e) {
throw new CloudRuntimeException(e.toString());
}
@@ -1057,6 +1223,30 @@ private KVMPhysicalDisk createPhysicalDiskByLibVirt(String name, KVMStoragePool
return disk;
}
+ /**
+ * Activates a CLVM volume in shared mode so all hosts in the cluster can access it.
+ * This is necessary after volume creation since libvirt creates LVs with exclusive activation by default.
+ *
+ * @param volumePath The full path to the LV (e.g., /dev/vgname/volume-uuid)
+ */
+ private void activateClvmVolumeInSharedMode(String volumePath) {
+ try {
+ Script cmd = new Script("lvchange", 5000, logger);
+ cmd.add("-asy"); // Activate in shared mode
+ cmd.add(volumePath);
+
+ String result = cmd.execute();
+ if (result != null) {
+ logger.error("Failed to activate CLVM volume {} in shared mode. Result: {}", volumePath, result);
+ throw new CloudRuntimeException("Failed to activate CLVM volume in shared mode: " + result);
+ }
+ logger.info("Successfully activated CLVM volume {} in shared mode", volumePath);
+ } catch (Exception e) {
+ logger.error("Exception while activating CLVM volume {} in shared mode: {}", volumePath, e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to activate CLVM volume in shared mode: " + e.getMessage(), e);
+ }
+ }
+
private KVMPhysicalDisk createPhysicalDiskByQemuImg(String name, KVMStoragePool pool, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size,
byte[] passphrase) {
@@ -1116,6 +1306,17 @@ private KVMPhysicalDisk createPhysicalDiskByQemuImg(String name, KVMStoragePool
@Override
public boolean connectPhysicalDisk(String name, KVMStoragePool pool, Map details, boolean isVMMigrate) {
// this is for managed storage that needs to prep disks prior to use
+ if (pool.getType() == StoragePoolType.CLVM && isVMMigrate) {
+ logger.info("Activating CLVM volume {} at location: {} in shared mode for VM migration", name, pool.getLocalPath() + File.separator + name);
+ Script activateVolInSharedMode = new Script("lvchange", 5000, logger);
+ activateVolInSharedMode.add("-asy");
+ activateVolInSharedMode.add(pool.getLocalPath() + File.separator + name);
+ String result = activateVolInSharedMode.execute();
+ if (result != null) {
+ logger.error("Failed to activate CLVM volume {} in shared mode for VM migration. Command output: {}", name, result);
+ return false;
+ }
+ }
return true;
}
@@ -1227,7 +1428,16 @@ public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.Imag
LibvirtStoragePool libvirtPool = (LibvirtStoragePool)pool;
try {
StorageVol vol = getVolume(libvirtPool.getPool(), uuid);
- logger.debug("Instructing libvirt to remove volume " + uuid + " from pool " + pool.getUuid());
+ if (vol == null) {
+ logger.warn("Volume {} not found in libvirt pool {}, it may have been already deleted", uuid, pool.getUuid());
+
+ // For CLVM, attempt direct LVM cleanup in case the volume exists but libvirt can't see it
+ if (pool.getType() == StoragePoolType.CLVM) {
+ return cleanupCLVMVolume(uuid, pool);
+ }
+ return true;
+ }
+ logger.debug("Instructing libvirt to remove volume {} from pool {}", uuid, pool.getUuid());
if(Storage.ImageFormat.DIR.equals(format)){
deleteDirVol(libvirtPool, vol);
} else {
@@ -1236,10 +1446,83 @@ public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.Imag
vol.free();
return true;
} catch (LibvirtException e) {
+ // For CLVM, if libvirt fails, try direct LVM cleanup
+ if (pool.getType() == StoragePoolType.CLVM) {
+ logger.warn("Libvirt failed to delete CLVM volume {}, attempting direct LVM cleanup: {}", uuid, e.getMessage());
+ return cleanupCLVMVolume(uuid, pool);
+ }
throw new CloudRuntimeException(e.toString());
}
}
+ /**
+ * Clean up CLVM volume and its snapshots directly using LVM commands.
+ * This is used as a fallback when libvirt cannot find or delete the volume.
+ */
+ private boolean cleanupCLVMVolume(String uuid, KVMStoragePool pool) {
+ logger.info("Starting direct LVM cleanup for CLVM volume: {} in pool: {}", uuid, pool.getUuid());
+
+ try {
+ String sourceDir = pool.getLocalPath();
+ if (sourceDir == null || sourceDir.isEmpty()) {
+ logger.debug("Source directory is null or empty, cannot determine VG name for CLVM pool {}, skipping direct cleanup", pool.getUuid());
+ return true;
+ }
+ String vgName = getVgName(pool, sourceDir);
+ logger.info("Determined VG name: {} for pool: {}", vgName, pool.getUuid());
+
+ if (vgName == null || vgName.isEmpty()) {
+ logger.warn("Cannot determine VG name for CLVM pool {}, skipping direct cleanup", pool.getUuid());
+ return true;
+ }
+
+ String lvPath = "/dev/" + vgName + "/" + uuid;
+ logger.debug("Volume path: {}", lvPath);
+
+ // Check if the LV exists
+ Script checkLvs = new Script("lvs", 5000, logger);
+ checkLvs.add("--noheadings");
+ checkLvs.add("--unbuffered");
+ checkLvs.add(lvPath);
+
+ logger.info("Checking if volume exists: lvs --noheadings --unbuffered {}", lvPath);
+ String checkResult = checkLvs.execute();
+
+ if (checkResult != null) {
+ logger.info("CLVM volume {} does not exist in LVM (check returned: {}), considering it as already deleted", uuid, checkResult);
+ return true;
+ }
+
+ logger.info("Volume {} exists, proceeding with cleanup", uuid);
+
+ logger.info("Step 1: Zero-filling volume {} for security", uuid);
+ secureZeroFillVolume(lvPath, uuid);
+
+ logger.info("Step 2: Removing volume {}", uuid);
+ Script removeLv = new Script("lvremove", 10000, logger);
+ removeLv.add("-f");
+ removeLv.add(lvPath);
+
+ logger.info("Executing command: lvremove -f {}", lvPath);
+ String removeResult = removeLv.execute();
+
+ if (removeResult == null) {
+ logger.info("Successfully removed CLVM volume {} using direct LVM cleanup", uuid);
+ return true;
+ } else {
+ logger.warn("Command 'lvremove -f {}' returned error: {}", lvPath, removeResult);
+ if (removeResult.contains("not found") || removeResult.contains("Failed to find")) {
+ logger.info("CLVM volume {} not found during cleanup, considering it as already deleted", uuid);
+ return true;
+ }
+ return false;
+ }
+ } catch (Exception ex) {
+ logger.error("Exception during CLVM volume cleanup for {}: {}", uuid, ex.getMessage(), ex);
+ return true;
+ }
+ }
+
/**
* This function copies a physical disk from Secondary Storage to Primary Storage
* or from Primary to Primary Storage
@@ -1737,6 +2020,80 @@ private void deleteVol(LibvirtStoragePool pool, StorageVol vol) throws LibvirtEx
vol.delete(0);
}
+ /**
+ * Securely zero-fill a volume before deletion to prevent data leakage.
+ * Uses blkdiscard (fast TRIM) as primary method, with dd zero-fill as fallback.
+ *
+ * @param lvPath The full path to the logical volume (e.g., /dev/vgname/lvname)
+ * @param volumeUuid The UUID of the volume for logging purposes
+ */
+ private void secureZeroFillVolume(String lvPath, String volumeUuid) {
+ logger.info("Starting secure zero-fill for CLVM volume: {} at path: {}", volumeUuid, lvPath);
+
+ boolean blkdiscardSuccess = false;
+
+ // Try blkdiscard first (fast - sends TRIM commands)
+ try {
+ Script blkdiscard = new Script("blkdiscard", 300000, logger); // 5 minute timeout
+ blkdiscard.add("-f"); // Force flag to suppress confirmation prompts
+ blkdiscard.add(lvPath);
+
+ String result = blkdiscard.execute();
+ if (result == null) {
+ logger.info("Successfully zero-filled CLVM volume {} using blkdiscard (TRIM)", volumeUuid);
+ blkdiscardSuccess = true;
+ } else {
+ // Check if the error is "Operation not supported" - common with thick LVM without TRIM support
+ if (result.contains("Operation not supported") || result.contains("BLKDISCARD ioctl failed")) {
+ logger.info("blkdiscard not supported for volume {} (device doesn't support TRIM/DISCARD), using dd fallback", volumeUuid);
+ } else {
+ logger.warn("blkdiscard failed for volume {}: {}, will try dd fallback", volumeUuid, result);
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("Exception during blkdiscard for volume {}: {}, will try dd fallback", volumeUuid, e.getMessage());
+ }
+
+ // Fallback to dd zero-fill (slow but thorough)
+ if (!blkdiscardSuccess) {
+ logger.info("Attempting zero-fill using dd for CLVM volume: {}", volumeUuid);
+ try {
+ // Use bash to chain commands with proper error handling
+ // nice -n 19: lowest CPU priority
+ // ionice -c 2 -n 7: best-effort I/O scheduling with lowest priority
+ // oflag=direct: bypass cache for more predictable performance
+ // || true at the end ensures the command doesn't fail even if dd returns error (which it does when disk is full - expected)
+ String command = String.format(
+ "nice -n 19 ionice -c 2 -n 7 dd if=/dev/zero of=%s bs=1M oflag=direct 2>&1 || true",
+ lvPath
+ );
+
+ Script ddZeroFill = new Script("/bin/bash", 3600000, logger); // 60 minute timeout for large volumes
+ ddZeroFill.add("-c");
+ ddZeroFill.add(command);
+
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ String ddResult = ddZeroFill.execute(parser);
+ String output = parser.getLines();
+
+ // dd writes to stderr even on success, check for completion indicators
+ if (output != null && (output.contains("copied") || output.contains("records in") ||
+ output.contains("No space left on device"))) {
+ logger.info("Successfully zero-filled CLVM volume {} using dd", volumeUuid);
+ } else if (ddResult == null) {
+ logger.info("Zero-fill completed for CLVM volume {}", volumeUuid);
+ } else {
+ logger.warn("dd zero-fill for volume {} completed with output: {}", volumeUuid,
+ output != null ? output : ddResult);
+ }
+ } catch (Exception e) {
+ // Log warning but don't fail the deletion - zero-fill is a best-effort security measure
+ logger.warn("Failed to zero-fill CLVM volume {} before deletion: {}. Proceeding with deletion anyway.",
+ volumeUuid, e.getMessage());
+ }
+ }
+ }
+
private void deleteDirVol(LibvirtStoragePool pool, StorageVol vol) throws LibvirtException {
Script.runSimpleBashScript("rm -r --interactive=never " + vol.getPath());
}
diff --git a/scripts/storage/qcow2/managesnapshot.sh b/scripts/storage/qcow2/managesnapshot.sh
index 3650bdd9b6f6..cba330b6b89e 100755
--- a/scripts/storage/qcow2/managesnapshot.sh
+++ b/scripts/storage/qcow2/managesnapshot.sh
@@ -58,15 +58,11 @@ is_lv() {
}
get_vg() {
- lvm lvs --noheadings --unbuffered --separator=/ "${1}" | cut -d '/' -f 2
+ lvm lvs --noheadings --unbuffered --separator=/ "${1}" | cut -d '/' -f 2 | tr -d ' '
}
get_lv() {
- lvm lvs --noheadings --unbuffered --separator=/ "${1}" | cut -d '/' -f 1
-}
-
-double_hyphens() {
- echo ${1} | sed -e "s/-/--/g"
+ lvm lvs --noheadings --unbuffered --separator=/ "${1}" | cut -d '/' -f 1 | tr -d ' '
}
create_snapshot() {
@@ -77,30 +73,39 @@ create_snapshot() {
islv_ret=$?
if [ ${dmsnapshot} = "yes" ] && [ "$islv_ret" == "1" ]; then
+ # Modern LVM snapshot approach
local lv=`get_lv ${disk}`
local vg=`get_vg ${disk}`
- local lv_dm=`double_hyphens ${lv}`
- local vg_dm=`double_hyphens ${vg}`
- local lvdevice=/dev/mapper/${vg_dm}-${lv_dm}
- local lv_bytes=`blockdev --getsize64 ${lvdevice}`
- local lv_sectors=`blockdev --getsz ${lvdevice}`
-
- lvm lvcreate --size ${lv_bytes}b --name "${snapshotname}-cow" ${vg} >&2 || return 2
- dmsetup suspend ${vg_dm}-${lv_dm} >&2
- if dmsetup info -c --noheadings -o name ${vg_dm}-${lv_dm}-real > /dev/null 2>&1; then
- echo "0 ${lv_sectors} snapshot ${lvdevice}-real /dev/mapper/${vg_dm}-${snapshotname}--cow p 64" | \
- dmsetup create "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
- dmsetup resume "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
- else
- dmsetup table ${vg_dm}-${lv_dm} | dmsetup create ${vg_dm}-${lv_dm}-real >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
- dmsetup resume ${vg_dm}-${lv_dm}-real >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
- echo "0 ${lv_sectors} snapshot ${lvdevice}-real /dev/mapper/${vg_dm}-${snapshotname}--cow p 64" | \
- dmsetup create "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
- echo "0 ${lv_sectors} snapshot-origin ${lvdevice}-real" | \
- dmsetup load ${vg_dm}-${lv_dm} >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
- dmsetup resume "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
+ local lv_bytes=`blockdev --getsize64 ${disk}`
+
+ # Calculate snapshot size (10% of origin size, minimum 100MB, maximum 10GB)
+ local snapshot_size=$((lv_bytes / 10))
+ local min_size=$((100 * 1024 * 1024)) # 100MB
+ local max_size=$((10 * 1024 * 1024 * 1024)) # 10GB
+
+ if [ ${snapshot_size} -lt ${min_size} ]; then
+ snapshot_size=${min_size}
+ elif [ ${snapshot_size} -gt ${max_size} ]; then
+ snapshot_size=${max_size}
+ fi
+
+ # Round to nearest 512-byte multiple (LVM requirement)
+ snapshot_size=$(((snapshot_size + 511) / 512 * 512))
+
+ # Create LVM snapshot using native command
+ lvm lvcreate -L ${snapshot_size}b -s -n "${snapshotname}" "${vg}/${lv}" >&2
+ if [ $? -gt 0 ]; then
+ printf "***Failed to create LVM snapshot ${snapshotname} for ${vg}/${lv}\n" >&2
+ return 2
+ fi
+
+ # Activate the snapshot
+ lvm lvchange --yes -ay "${vg}/${snapshotname}" >&2
+ if [ $? -gt 0 ]; then
+ printf "***Failed to activate LVM snapshot ${snapshotname}\n" >&2
+ lvm lvremove -f "${vg}/${snapshotname}" >&2
+ return 2
fi
- dmsetup resume "${vg_dm}-${lv_dm}" >&2
elif [ -f "${disk}" ]; then
$qemu_img snapshot -c "$snapshotname" $disk
@@ -132,25 +137,22 @@ destroy_snapshot() {
islv_ret=$?
if [ "$islv_ret" == "1" ]; then
+ # Modern LVM snapshot deletion
local lv=`get_lv ${disk}`
local vg=`get_vg ${disk}`
- local lv_dm=`double_hyphens ${lv}`
- local vg_dm=`double_hyphens ${vg}`
- if [ -e /dev/mapper/${vg_dm}-${lv_dm}-real ]; then
- local dm_refcount=`dmsetup info -c --noheadings -o open ${vg_dm}-${lv_dm}-real`
- if [ ${dm_refcount} -le 2 ]; then
- dmsetup suspend ${vg_dm}-${lv_dm} >&2
- dmsetup table ${vg_dm}-${lv_dm}-real | dmsetup load ${vg_dm}-${lv_dm} >&2
- dmsetup resume ${vg_dm}-${lv_dm}
- dmsetup remove "${vg_dm}-${snapshotname}"
- dmsetup remove ${vg_dm}-${lv_dm}-real
- else
- dmsetup remove "${vg_dm}-${snapshotname}"
- fi
- else
- dmsetup remove "${vg_dm}-${snapshotname}"
+
+ # Check if snapshot exists
+ if ! lvm lvs "${vg}/${snapshotname}" > /dev/null 2>&1; then
+ printf "Snapshot ${vg}/${snapshotname} does not exist or was already deleted\n" >&2
+ return 0
+ fi
+
+ # Remove the snapshot using native LVM command
+ lvm lvremove -f "${vg}/${snapshotname}" >&2
+ if [ $? -gt 0 ]; then
+ printf "***Failed to remove LVM snapshot ${vg}/${snapshotname}\n" >&2
+ return 2
fi
- lvm lvremove -f "${vg}/${snapshotname}-cow"
elif [ -f $disk ]; then
#delete all the existing snapshots
$qemu_img snapshot -l $disk |tail -n +3|awk '{print $1}'|xargs -I {} $qemu_img snapshot -d {} $disk >&2
@@ -170,12 +172,37 @@ rollback_snapshot() {
local disk=$1
local snapshotname="$2"
local failed=0
+ is_lv ${disk}
+ islv_ret=$?
- $qemu_img snapshot -a $snapshotname $disk
+ if [ ${dmrollback} = "yes" ] && [ "$islv_ret" == "1" ]; then
+ # Modern LVM snapshot merge (rollback)
+ local lv=`get_lv ${disk}`
+ local vg=`get_vg ${disk}`
- if [ $? -gt 0 ]
- then
- printf "***Failed to apply snapshot $snapshotname for path $disk\n" >&2
+ # Check if snapshot exists
+ if ! lvm lvs "${vg}/${snapshotname}" > /dev/null 2>&1; then
+ printf "***Snapshot ${vg}/${snapshotname} does not exist\n" >&2
+ return 1
+ fi
+
+ # Use lvconvert --merge to rollback
+ lvm lvconvert --merge "${vg}/${snapshotname}" >&2
+ if [ $? -gt 0 ]; then
+ printf "***Failed to merge/rollback snapshot ${snapshotname} for ${vg}/${lv}\n" >&2
+ return 1
+ fi
+ elif [ -f ${disk} ]; then
+ # File-based snapshot rollback using qemu-img
+ $qemu_img snapshot -a $snapshotname $disk
+
+ if [ $? -gt 0 ]
+ then
+ printf "***Failed to apply snapshot $snapshotname for path $disk\n" >&2
+ failed=1
+ fi
+ else
+ printf "***Failed to rollback snapshot $snapshotname, undefined type $disk\n" >&2
failed=1
fi
@@ -204,20 +231,21 @@ backup_snapshot() {
if [ ${dmsnapshot} = "yes" ] && [ "$islv_ret" == "1" ] ; then
local vg=`get_vg ${disk}`
- local vg_dm=`double_hyphens ${vg}`
local scriptdir=`dirname ${0}`
- if ! dmsetup info -c --noheadings -o name ${vg_dm}-${snapshotname} > /dev/null 2>&1; then
+ # Check if snapshot exists using native LVM command
+ if ! lvm lvs "${vg}/${snapshotname}" > /dev/null 2>&1; then
printf "Disk ${disk} has no snapshot called ${snapshotname}.\n" >&2
return 1
fi
- qemuimg_ret=$($qemu_img $forceShareFlag -f raw -O qcow2 "/dev/mapper/${vg_dm}-${snapshotname}" "${destPath}/${destName}")
+ # Use native LVM path for backup
+ qemuimg_ret=$($qemu_img convert $forceShareFlag -f raw -O qcow2 "/dev/${vg}/${snapshotname}" "${destPath}/${destName}" 2>&1)
ret_code=$?
- if [ $ret_code -gt 0 ] && [[ $qemuimg_ret == *"snapshot: invalid option -- 'U'"* ]]
+ if [ $ret_code -gt 0 ] && ([[ $qemuimg_ret == *"invalid option"*"'U'"* ]] || [[ $qemuimg_ret == *"unrecognized option"*"'-U'"* ]])
then
forceShareFlag=""
- $qemu_img $forceShareFlag -f raw -O qcow2 "/dev/mapper/${vg_dm}-${snapshotname}" "${destPath}/${destName}"
+ $qemu_img convert $forceShareFlag -f raw -O qcow2 "/dev/${vg}/${snapshotname}" "${destPath}/${destName}"
ret_code=$?
fi
if [ $ret_code -gt 0 ]
@@ -240,9 +268,9 @@ backup_snapshot() {
# Backup VM snapshot
qemuimg_ret=$($qemu_img snapshot $forceShareFlag -l $disk 2>&1)
ret_code=$?
- if [ $ret_code -gt 0 ] && [[ $qemuimg_ret == *"snapshot: invalid option -- 'U'"* ]]; then
+ if [ $ret_code -gt 0 ] && ([[ $qemuimg_ret == *"invalid option"*"'U'"* ]] || [[ $qemuimg_ret == *"unrecognized option"*"'-U'"* ]]); then
forceShareFlag=""
- qemuimg_ret=$($qemu_img snapshot $forceShareFlag -l $disk)
+ qemuimg_ret=$($qemu_img snapshot $forceShareFlag -l $disk 2>&1)
ret_code=$?
fi
@@ -251,11 +279,11 @@ backup_snapshot() {
return 1
fi
- qemuimg_ret=$($qemu_img convert $forceShareFlag -f qcow2 -O qcow2 -l snapshot.name=$snapshotname $disk $destPath/$destName 2>&1 > /dev/null)
+ qemuimg_ret=$($qemu_img convert $forceShareFlag -f qcow2 -O qcow2 -l snapshot.name=$snapshotname $disk $destPath/$destName 2>&1)
ret_code=$?
- if [ $ret_code -gt 0 ] && [[ $qemuimg_ret == *"convert: invalid option -- 'U'"* ]]; then
+ if [ $ret_code -gt 0 ] && ([[ $qemuimg_ret == *"invalid option"*"'U'"* ]] || [[ $qemuimg_ret == *"unrecognized option"*"'-U'"* ]]); then
forceShareFlag=""
- qemuimg_ret=$($qemu_img convert $forceShareFlag -f qcow2 -O qcow2 -l snapshot.name=$snapshotname $disk $destPath/$destName 2>&1 > /dev/null)
+ qemuimg_ret=$($qemu_img convert $forceShareFlag -f qcow2 -O qcow2 -l snapshot.name=$snapshotname $disk $destPath/$destName 2>&1)
ret_code=$?
fi
diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
index 17961dbd955f..02c1bf8ab017 100644
--- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
@@ -130,6 +130,7 @@
import com.cloud.agent.AgentManager;
import com.cloud.agent.api.Answer;
+import org.apache.cloudstack.storage.command.ClvmLockTransferCommand;
import com.cloud.agent.api.ModifyTargetsCommand;
import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
@@ -152,6 +153,7 @@
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.exception.OperationTimedoutException;
import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.ResourceAllocationException;
import com.cloud.exception.StorageUnavailableException;
@@ -2602,21 +2604,42 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device
logger.trace(String.format("is it needed to move the volume: %b?", moveVolumeNeeded));
}
- if (moveVolumeNeeded) {
+ // Check if CLVM lock transfer is needed (even if moveVolumeNeeded is false)
+ // This handles the case where the volume is already on the correct storage pool
+ // but the VM is running on a different host, requiring only a lock transfer
+ boolean isClvmLockTransferNeeded = !moveVolumeNeeded &&
+ isClvmLockTransferRequired(newVolumeOnPrimaryStorage, existingVolumeOfVm, vm);
+
+ if (isClvmLockTransferNeeded) {
+ // CLVM lock transfer - no data copy, no pool change needed
+ newVolumeOnPrimaryStorage = executeClvmLightweightMigration(
+ newVolumeOnPrimaryStorage, vm, existingVolumeOfVm,
+ "CLVM lock transfer", "same pool to different host");
+ } else if (moveVolumeNeeded) {
PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)newVolumeOnPrimaryStorage.getDataStore();
if (primaryStore.isLocal()) {
throw new CloudRuntimeException(
"Failed to attach local data volume " + volumeToAttach.getName() + " to VM " + vm.getDisplayName() + " as migration of local data volume is not allowed");
}
- StoragePoolVO vmRootVolumePool = _storagePoolDao.findById(existingVolumeOfVm.getPoolId());
- try {
- HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId());
- newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), vmRootVolumePool.getClusterId(),
- volumeToAttachHyperType);
- } catch (ConcurrentOperationException | StorageUnavailableException e) {
- logger.debug("move volume failed", e);
- throw new CloudRuntimeException("move volume failed", e);
+ boolean isClvmLightweightMigration = isClvmLightweightMigrationNeeded(
+ newVolumeOnPrimaryStorage, existingVolumeOfVm, vm);
+
+ if (isClvmLightweightMigration) {
+ newVolumeOnPrimaryStorage = executeClvmLightweightMigration(
+ newVolumeOnPrimaryStorage, vm, existingVolumeOfVm,
+ "CLVM lightweight migration", "different pools, same VG");
+ } else {
+ StoragePoolVO vmRootVolumePool = _storagePoolDao.findById(existingVolumeOfVm.getPoolId());
+
+ try {
+ HypervisorType volumeToAttachHyperType = _volsDao.getHypervisorType(volumeToAttach.getId());
+ newVolumeOnPrimaryStorage = _volumeMgr.moveVolume(newVolumeOnPrimaryStorage, vmRootVolumePool.getDataCenterId(), vmRootVolumePool.getPodId(), vmRootVolumePool.getClusterId(),
+ volumeToAttachHyperType);
+ } catch (ConcurrentOperationException | StorageUnavailableException e) {
+ logger.debug("move volume failed", e);
+ throw new CloudRuntimeException("move volume failed", e);
+ }
}
}
VolumeVO newVol = _volsDao.findById(newVolumeOnPrimaryStorage.getId());
@@ -2631,6 +2654,419 @@ private Volume orchestrateAttachVolumeToVM(Long vmId, Long volumeId, Long device
return newVol;
}
+ /**
+ * Helper method to get storage pools for volume and VM.
+ *
+ * @param volumeToAttach The volume being attached
+ * @param vmExistingVolume The VM's existing volume
+ * @return Pair of StoragePoolVO objects (volumePool, vmPool), or null if either pool is missing
+ */
+ private Pair getStoragePoolsForVolumeAttachment(VolumeInfo volumeToAttach, VolumeVO vmExistingVolume) {
+ if (volumeToAttach == null || vmExistingVolume == null) {
+ return null;
+ }
+
+ StoragePoolVO volumePool = _storagePoolDao.findById(volumeToAttach.getPoolId());
+ StoragePoolVO vmPool = _storagePoolDao.findById(vmExistingVolume.getPoolId());
+
+ if (volumePool == null || vmPool == null) {
+ return null;
+ }
+
+ return new Pair<>(volumePool, vmPool);
+ }
+
+ /**
+ * Checks if both storage pools are CLVM type.
+ *
+ * @param volumePool Storage pool for the volume
+ * @param vmPool Storage pool for the VM
+ * @return true if both pools are CLVM type
+ */
+ private boolean areBothPoolsClvmType(StoragePoolVO volumePool, StoragePoolVO vmPool) {
+ return volumePool.getPoolType() == StoragePoolType.CLVM &&
+ vmPool.getPoolType() == StoragePoolType.CLVM;
+ }
+
+ /**
+ * Checks if a storage pool is CLVM type.
+ *
+ * @param pool Storage pool to check
+ * @return true if pool is CLVM type
+ */
+ private boolean isClvmPool(StoragePoolVO pool) {
+ return pool != null && pool.getPoolType() == StoragePoolType.CLVM;
+ }
+
+ /**
+ * Extracts the Volume Group (VG) name from a CLVM storage pool path.
+ * For CLVM, the path is typically: /vgname
+ *
+ * @param poolPath The storage pool path
+ * @return VG name, or null if path is null
+ */
+ private String extractVgNameFromPath(String poolPath) {
+ if (poolPath == null) {
+ return null;
+ }
+ return poolPath.startsWith("/") ? poolPath.substring(1) : poolPath;
+ }
+
+ /**
+ * Checks if two CLVM storage pools are in the same Volume Group.
+ *
+ * @param volumePool Storage pool for the volume
+ * @param vmPool Storage pool for the VM
+ * @return true if both pools are in the same VG
+ */
+ private boolean arePoolsInSameVolumeGroup(StoragePoolVO volumePool, StoragePoolVO vmPool) {
+ String volumeVgName = extractVgNameFromPath(volumePool.getPath());
+ String vmVgName = extractVgNameFromPath(vmPool.getPath());
+
+ return volumeVgName != null && volumeVgName.equals(vmVgName);
+ }
+
+ /**
+ * Determines if a CLVM volume needs lightweight lock migration instead of full data copy.
+ *
+ * Lightweight migration is needed when:
+ * 1. Volume is on CLVM storage
+ * 2. Source and destination are in the same Volume Group
+ * 3. Only the host/lock needs to change (not the storage pool)
+ *
+ * @param volumeToAttach The volume being attached
+ * @param vmExistingVolume The VM's existing volume (typically root volume)
+ * @param vm The VM to attach the volume to
+ * @return true if lightweight CLVM lock migration should be used
+ */
+ private boolean isClvmLightweightMigrationNeeded(VolumeInfo volumeToAttach, VolumeVO vmExistingVolume, UserVmVO vm) {
+ Pair pools = getStoragePoolsForVolumeAttachment(volumeToAttach, vmExistingVolume);
+ if (pools == null) {
+ return false;
+ }
+
+ StoragePoolVO volumePool = pools.first();
+ StoragePoolVO vmPool = pools.second();
+
+ if (!areBothPoolsClvmType(volumePool, vmPool)) {
+ return false;
+ }
+
+ if (arePoolsInSameVolumeGroup(volumePool, vmPool)) {
+ String vgName = extractVgNameFromPath(volumePool.getPath());
+ logger.info("CLVM lightweight migration detected: Volume {} is in same VG ({}) as VM {} volumes, " +
+ "only lock transfer needed (no data copy)",
+ volumeToAttach.getUuid(), vgName, vm.getUuid());
+ return true;
+ }
+
+ return false;
+ }
+
+ /**
+ * Determines if a CLVM volume requires lock transfer when already on the correct storage pool.
+ *
+ * Lock transfer is needed when:
+ * 1. Volume is already on the same CLVM storage pool as VM's volumes
+ * 2. But the volume lock is held by a different host than where the VM is running
+ * 3. Only the lock needs to change (no pool change, no data copy)
+ *
+ * @param volumeToAttach The volume being attached
+ * @param vmExistingVolume The VM's existing volume (typically root volume)
+ * @param vm The VM to attach the volume to
+ * @return true if CLVM lock transfer is needed (but not full migration)
+ */
+ private boolean isClvmLockTransferRequired(VolumeInfo volumeToAttach, VolumeVO vmExistingVolume, UserVmVO vm) {
+ if (vm == null) {
+ return false;
+ }
+
+ Pair pools = getStoragePoolsForVolumeAttachment(volumeToAttach, vmExistingVolume);
+ if (pools == null) {
+ return false;
+ }
+
+ StoragePoolVO volumePool = pools.first();
+ StoragePoolVO vmPool = pools.second();
+
+ if (!isClvmPool(volumePool)) {
+ return false;
+ }
+
+ if (volumePool.getId() != vmPool.getId()) {
+ return false;
+ }
+
+ Long volumeLockHostId = findClvmVolumeLockHost(volumeToAttach);
+
+ Long vmHostId = vm.getHostId();
+ if (vmHostId == null) {
+ vmHostId = vm.getLastHostId();
+ }
+
+ if (volumeLockHostId == null) {
+ VolumeVO volumeVO = _volsDao.findById(volumeToAttach.getId());
+ if (volumeVO != null && volumeVO.getState() == Volume.State.Ready && volumeVO.getInstanceId() == null) {
+ logger.debug("CLVM volume {} is detached on same pool as VM {}, lock transfer may be needed",
+ volumeToAttach.getUuid(), vm.getUuid());
+ return true;
+ }
+ }
+
+ if (volumeLockHostId != null && vmHostId != null && !volumeLockHostId.equals(vmHostId)) {
+ logger.info("CLVM lock transfer required: Volume {} lock is on host {} but VM {} is on host {}",
+ volumeToAttach.getUuid(), volumeLockHostId, vm.getUuid(), vmHostId);
+ return true;
+ }
+
+ return false;
+ }
+
+ /**
+ * Determines the destination host for CLVM lock migration.
+ *
+ * If VM is running, uses the VM's current host.
+ * If VM is stopped, picks an available UP host from the storage pool's cluster.
+ *
+ * @param vm The VM
+ * @param vmExistingVolume The VM's existing volume (to determine cluster)
+ * @return Host ID, or null if cannot be determined
+ */
+ private Long determineClvmLockDestinationHost(UserVmVO vm, VolumeVO vmExistingVolume) {
+ Long destHostId = vm.getHostId();
+ if (destHostId != null) {
+ return destHostId;
+ }
+
+ if (vmExistingVolume != null && vmExistingVolume.getPoolId() != null) {
+ StoragePoolVO pool = _storagePoolDao.findById(vmExistingVolume.getPoolId());
+ if (pool != null && pool.getClusterId() != null) {
+ List hosts = _hostDao.findByClusterId(pool.getClusterId());
+ if (hosts != null && !hosts.isEmpty()) {
+ // Pick first available UP host
+ for (HostVO host : hosts) {
+ if (host.getStatus() == Status.Up) {
+ destHostId = host.getId();
+ logger.debug("VM {} is stopped, selected host {} from cluster {} for CLVM lock migration",
+ vm.getUuid(), destHostId, pool.getClusterId());
+ return destHostId;
+ }
+ }
+ }
+ }
+ }
+
+ return null;
+ }
+
+ /**
+ * Executes CLVM lightweight migration with consistent logging and error handling.
+ *
+ * This helper method wraps the actual migration logic to eliminate code duplication
+ * between different CLVM migration scenarios (lock transfer vs. lightweight migration).
+ *
+ * @param volume The volume to migrate locks for
+ * @param vm The VM to attach the volume to
+ * @param vmExistingVolume The VM's existing volume (to determine target host)
+ * @param operationType Description of the operation type for logging (e.g., "CLVM lock transfer")
+ * @param scenarioDescription Description of the scenario for logging (e.g., "same pool to different host")
+ * @return Updated VolumeInfo after lock migration
+ * @throws CloudRuntimeException if migration fails
+ */
+ private VolumeInfo executeClvmLightweightMigration(VolumeInfo volume, UserVmVO vm, VolumeVO vmExistingVolume,
+ String operationType, String scenarioDescription) {
+ logger.info("Performing {} for volume {} to VM {} ({})",
+ operationType, volume.getUuid(), vm.getUuid(), scenarioDescription);
+
+ try {
+ return performClvmLightweightMigration(volume, vm, vmExistingVolume);
+ } catch (Exception e) {
+ logger.error("{} failed for volume {}: {}",
+ operationType, volume.getUuid(), e.getMessage(), e);
+ throw new CloudRuntimeException(operationType + " failed", e);
+ }
+ }
+
+ /**
+ * Performs lightweight CLVM lock migration for volume attachment.
+ *
+ * This transfers the LVM exclusive lock from the current host to the VM's host
+ * without copying data (since CLVM volumes are on cluster-wide shared storage).
+ *
+ * @param volume The volume to migrate locks for
+ * @param vm The VM to attach the volume to
+ * @param vmExistingVolume The VM's existing volume (to determine target host)
+ * @return Updated VolumeInfo after lock migration
+ * @throws Exception if lock migration fails
+ */
+ private VolumeInfo performClvmLightweightMigration(VolumeInfo volume, UserVmVO vm, VolumeVO vmExistingVolume) throws Exception {
+ String volumeUuid = volume.getUuid();
+ Long vmId = vm.getId();
+
+ logger.info("Starting CLVM lightweight lock migration for volume {} (id: {}) to VM {} (id: {})",
+ volumeUuid, volume.getId(), vm.getUuid(), vmId);
+
+ Long destHostId = determineClvmLockDestinationHost(vm, vmExistingVolume);
+
+ if (destHostId == null) {
+ throw new CloudRuntimeException(
+ "Cannot determine destination host for CLVM lock migration - VM has no host and no available cluster hosts");
+ }
+
+ Long sourceHostId = findClvmVolumeLockHost(volume);
+
+ if (sourceHostId == null) {
+ logger.warn("Could not determine source host for CLVM volume {} lock, " +
+ "assuming volume is not exclusively locked", volumeUuid);
+ sourceHostId = destHostId;
+ }
+
+ if (sourceHostId.equals(destHostId)) {
+ logger.info("CLVM volume {} already has lock on destination host {}, no migration needed",
+ volumeUuid, destHostId);
+ return volume;
+ }
+
+ logger.info("Migrating CLVM volume {} lock from host {} to host {}",
+ volumeUuid, sourceHostId, destHostId);
+
+ boolean success = transferClvmVolumeLock(volume, sourceHostId, destHostId);
+
+ if (!success) {
+ throw new CloudRuntimeException(
+ String.format("Failed to transfer CLVM lock for volume %s from host %s to host %s",
+ volumeUuid, sourceHostId, destHostId));
+ }
+
+ logger.info("Successfully migrated CLVM volume {} lock from host {} to host {}",
+ volumeUuid, sourceHostId, destHostId);
+
+ return volFactory.getVolume(volume.getId());
+ }
+
+ /**
+ * Finds which host currently has the exclusive lock on a CLVM volume.
+ *
+ * @param volume The CLVM volume
+ * @return Host ID that has the exclusive lock, or null if cannot be determined
+ */
+ private Long findClvmVolumeLockHost(VolumeInfo volume) {
+ // Strategy 1: Check volume_details for a host hint we may have stored
+ VolumeDetailVO detail = _volsDetailsDao.findDetail(volume.getId(), VolumeInfo.CLVM_LOCK_HOST_ID);
+ if (detail != null && detail.getValue() != null && !detail.getValue().isEmpty()) {
+ try {
+ return Long.parseLong(detail.getValue());
+ } catch (NumberFormatException e) {
+ logger.warn("Invalid clvmLockHostId in volume_details for volume {}: {}",
+ volume.getUuid(), detail.getValue());
+ }
+ }
+
+ // Strategy 2: If volume was attached to a VM, use that VM's last host
+ Long instanceId = volume.getInstanceId();
+ if (instanceId != null) {
+ VMInstanceVO vmInstance = _vmInstanceDao.findById(instanceId);
+ if (vmInstance != null && vmInstance.getHostId() != null) {
+ return vmInstance.getHostId();
+ }
+ }
+
+ // Strategy 3: Check any host in the pool's cluster
+ StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId());
+ if (pool != null && pool.getClusterId() != null) {
+ List hosts = _hostDao.findByClusterId(pool.getClusterId());
+ if (hosts != null && !hosts.isEmpty()) {
+ // Return first available UP host
+ for (HostVO host : hosts) {
+ if (host.getStatus() == Status.Up) {
+ return host.getId();
+ }
+ }
+ }
+ }
+
+ return null;
+ }
+
+ /**
+ * Transfers CLVM volume exclusive lock from source host to destination host.
+ *
+ * @param volume The volume to transfer lock for
+ * @param sourceHostId Host currently holding the lock
+ * @param destHostId Host to transfer lock to
+ * @return true if successful, false otherwise
+ */
+ private boolean transferClvmVolumeLock(VolumeInfo volume, Long sourceHostId, Long destHostId) {
+ String volumeUuid = volume.getUuid();
+
+ // Get storage pool info
+ StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId());
+ if (pool == null) {
+ logger.error("Cannot find storage pool for volume {}", volumeUuid);
+ return false;
+ }
+
+ String vgName = pool.getPath();
+ if (vgName.startsWith("/")) {
+ vgName = vgName.substring(1);
+ }
+
+ // Full LV path: /dev/vgname/volume-uuid
+ String lvPath = String.format("/dev/%s/%s", vgName, volumeUuid);
+
+ try {
+ // Step 1: Deactivate on source host (if different from dest)
+ if (!sourceHostId.equals(destHostId)) {
+ logger.debug("Deactivating CLVM volume {} on source host {}", volumeUuid, sourceHostId);
+
+ ClvmLockTransferCommand deactivateCmd = new ClvmLockTransferCommand(
+ ClvmLockTransferCommand.Operation.DEACTIVATE,
+ lvPath,
+ volumeUuid
+ );
+
+ Answer deactivateAnswer = _agentMgr.send(sourceHostId, deactivateCmd);
+
+ if (deactivateAnswer == null || !deactivateAnswer.getResult()) {
+ String error = deactivateAnswer != null ? deactivateAnswer.getDetails() : "null answer";
+ logger.warn("Failed to deactivate CLVM volume {} on source host {}: {}. " +
+ "Will attempt to activate on destination anyway.",
+ volumeUuid, sourceHostId, error);
+ }
+ }
+
+ // Step 2: Activate exclusively on destination host
+ logger.debug("Activating CLVM volume {} exclusively on destination host {}", volumeUuid, destHostId);
+
+ ClvmLockTransferCommand activateCmd = new ClvmLockTransferCommand(
+ ClvmLockTransferCommand.Operation.ACTIVATE_EXCLUSIVE,
+ lvPath,
+ volumeUuid
+ );
+
+ Answer activateAnswer = _agentMgr.send(destHostId, activateCmd);
+
+ if (activateAnswer == null || !activateAnswer.getResult()) {
+ String error = activateAnswer != null ? activateAnswer.getDetails() : "null answer";
+ logger.error("Failed to activate CLVM volume {} exclusively on dest host {}: {}",
+ volumeUuid, destHostId, error);
+ return false;
+ }
+
+ // Step 3: Store the new lock host in volume_details for future reference
+ _volsDetailsDao.addDetail(volume.getId(), VolumeInfo.CLVM_LOCK_HOST_ID, String.valueOf(destHostId), false);
+
+ logger.info("Successfully transferred CLVM lock for volume {} from host {} to host {}",
+ volumeUuid, sourceHostId, destHostId);
+
+ return true;
+
+ } catch (AgentUnavailableException | OperationTimedoutException e) {
+ logger.error("Exception during CLVM lock transfer for volume {}: {}", volumeUuid, e.getMessage(), e);
+ return false;
+ }
+ }
+
public Volume attachVolumeToVM(Long vmId, Long volumeId, Long deviceId, Boolean allowAttachForSharedFS) {
Account caller = CallContext.current().getCallingAccount();
diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java
index b5a03d1836bb..8109442acab4 100755
--- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java
+++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java
@@ -1667,6 +1667,9 @@ public SnapshotInfo takeSnapshot(VolumeInfo volume) throws ResourceAllocationExc
if (backupSnapToSecondary) {
if (!isKvmAndFileBasedStorage) {
backupSnapshotToSecondary(payload.getAsyncBackup(), snapshotStrategy, snapshotOnPrimary, payload.getZoneIds(), payload.getStoragePoolIds());
+ if (storagePool.getPoolType() == StoragePoolType.CLVM) {
+ _snapshotStoreDao.removeBySnapshotStore(snapshotId, snapshotOnPrimary.getDataStore().getId(), snapshotOnPrimary.getDataStore().getRole());
+ }
} else {
postSnapshotDirectlyToSecondary(snapshot, snapshotOnPrimary, snapshotId);
}