diff --git a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java index abc674ff0f9a..7867c685bbab 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java @@ -77,19 +77,24 @@ public class StoragePoolResponse extends BaseResponseWithAnnotations { @Param(description = "the name of the cluster for the storage pool") private String clusterName; + @SerializedName(ApiConstants.CAPACITY_BYTES) + @Param(description = "bytes CloudStack can provision from this storage pool", since = "4.22.0") + private Long capacityBytes; + + @Deprecated(since = "4.22.0") @SerializedName("disksizetotal") @Param(description = "the total disk size of the storage pool") private Long diskSizeTotal; @SerializedName("disksizeallocated") - @Param(description = "the host's currently allocated disk size") + @Param(description = "the pool's currently allocated disk size") private Long diskSizeAllocated; @SerializedName("disksizeused") - @Param(description = "the host's currently used disk size") + @Param(description = "the pool's currently used disk size") private Long diskSizeUsed; - @SerializedName("capacityiops") + @SerializedName(ApiConstants.CAPACITY_IOPS) @Param(description = "IOPS CloudStack can provision from this storage pool") private Long capacityIops; @@ -288,6 +293,14 @@ public void setClusterName(String clusterName) { this.clusterName = clusterName; } + public Long getCapacityBytes() { + return capacityBytes; + } + + public void setCapacityBytes(Long capacityBytes) { + this.capacityBytes = capacityBytes; + } + public Long getDiskSizeTotal() { return diskSizeTotal; } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java index 54f3c63f8d73..1acaccf09df4 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java @@ -24,8 +24,8 @@ import com.cloud.storage.StoragePool; public interface PrimaryDataStoreLifeCycle extends DataStoreLifeCycle { - public static final String CAPACITY_BYTES = "capacityBytes"; - public static final String CAPACITY_IOPS = "capacityIops"; + String CAPACITY_BYTES = "capacityBytes"; + String CAPACITY_IOPS = "capacityIops"; void updateStoragePool(StoragePool storagePool, Map details); void enableStoragePool(DataStore store); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index 8b230d03154e..6da02d7716b1 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -320,6 +320,9 @@ public StoragePoolVO persist(StoragePoolVO pool, Map details, Li pool = super.persist(pool); if (details != null) { for (Map.Entry detail : details.entrySet()) { + if (detail.getKey().toLowerCase().contains("password") || detail.getKey().toLowerCase().contains("token")) { + displayDetails = false; + } StoragePoolDetailVO vo = new StoragePoolDetailVO(pool.getId(), detail.getKey(), detail.getValue(), displayDetails); _detailsDao.persist(vo); } diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql index 4fcb2b75de5f..cc5b8140cb15 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql @@ -28,3 +28,7 @@ CALL `cloud`.`IDEMPOTENT_CHANGE_COLUMN`('cloud.domain_router', 'scripts_version' -- Add the column cross_zone_instance_creation to cloud.backup_repository. if enabled it means that new Instance can be created on all Zones from Backups on this Repository. CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backup_repository', 'cross_zone_instance_creation', 'TINYINT(1) DEFAULT NULL COMMENT ''Backup Repository can be used for disaster recovery on another zone'''); + +-- Updated display to false for password/token detail of the storage pool details +UPDATE `cloud`.`storage_pool_details` SET display = 0 WHERE name LIKE '%password%'; +UPDATE `cloud`.`storage_pool_details` SET display = 0 WHERE name LIKE '%token%'; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index 5e9891ef9895..1be52ad65776 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -85,8 +85,7 @@ public class PrimaryDataStoreHelper { DataStoreProviderManager dataStoreProviderMgr; public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) { - if(params == null) - { + if (params == null) { throw new InvalidParameterValueException("createPrimaryDataStore: Input params is null, please check"); } StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID(params.getUuid()); diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index b3cf825ea592..d1ad7af4499c 100644 --- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -18,6 +18,7 @@ */ package org.apache.cloudstack.storage.datastore.lifecycle; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; @@ -139,7 +140,6 @@ public DataStore initialize(Map dsInfos) { Long clusterId = (Long)dsInfos.get("clusterId"); Long podId = (Long)dsInfos.get("podId"); Long zoneId = (Long)dsInfos.get("zoneId"); - String url = (String)dsInfos.get("url"); String providerName = (String)dsInfos.get("providerName"); HypervisorType hypervisorType = (HypervisorType)dsInfos.get("hypervisorType"); if (clusterId != null && podId == null) { @@ -148,19 +148,43 @@ public DataStore initialize(Map dsInfos) { PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters(); - String tags = (String)dsInfos.get("tags"); - String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS); Map details = (Map)dsInfos.get("details"); + if (dsInfos.get("capacityBytes") != null) { + Long capacityBytes = (Long)dsInfos.get("capacityBytes"); + if (capacityBytes <= 0) { + throw new IllegalArgumentException("'capacityBytes' must be greater than 0."); + } + if (details == null) { + details = new HashMap<>(); + } + details.put(PrimaryDataStoreLifeCycle.CAPACITY_BYTES, String.valueOf(capacityBytes)); + parameters.setCapacityBytes(capacityBytes); + } + + if (dsInfos.get("capacityIops") != null) { + Long capacityIops = (Long)dsInfos.get("capacityIops"); + if (capacityIops <= 0) { + throw new IllegalArgumentException("'capacityIops' must be greater than 0."); + } + if (details == null) { + details = new HashMap<>(); + } + details.put(PrimaryDataStoreLifeCycle.CAPACITY_IOPS, String.valueOf(capacityIops)); + parameters.setCapacityIops(capacityIops); + } + parameters.setDetails(details); + + String tags = (String)dsInfos.get("tags"); parameters.setTags(tags); - parameters.setStorageAccessGroups(storageAccessGroups); parameters.setIsTagARule((Boolean)dsInfos.get("isTagARule")); - parameters.setDetails(details); + + String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS); + parameters.setStorageAccessGroups(storageAccessGroups); String scheme = dsInfos.get("scheme").toString(); String storageHost = dsInfos.get("host").toString(); String hostPath = dsInfos.get("hostPath").toString(); - String uri = String.format("%s://%s%s", scheme, storageHost, hostPath); Object localStorage = dsInfos.get("localStorage"); if (localStorage != null) { diff --git a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java index ce38727e42e5..8bfce47b1204 100644 --- a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java @@ -40,6 +40,7 @@ import com.cloud.api.ApiDBUtils; import com.cloud.api.query.vo.StoragePoolJoinVO; import com.cloud.capacity.CapacityManager; +import com.cloud.server.ResourceTag; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; @@ -152,6 +153,7 @@ public StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO pool, boolea } } } + poolResponse.setCapacityBytes(pool.getCapacityBytes()); poolResponse.setDiskSizeTotal(pool.getCapacityBytes()); poolResponse.setDiskSizeAllocated(allocatedSize); poolResponse.setDiskSizeUsed(pool.getUsedBytes()); @@ -180,6 +182,8 @@ public StoragePoolResponse newStoragePoolResponse(StoragePoolJoinVO pool, boolea poolResponse.setIsTagARule(pool.getIsTagARule()); poolResponse.setOverProvisionFactor(Double.toString(CapacityManager.StorageOverprovisioningFactor.valueIn(pool.getId()))); poolResponse.setManaged(storagePool.isManaged()); + Map details = ApiDBUtils.getResourceDetails(pool.getId(), ResourceTag.ResourceObjectType.Storage); + poolResponse.setDetails(details); // set async job if (pool.getJobId() != null) { @@ -252,6 +256,7 @@ public StoragePoolResponse newStoragePoolForMigrationResponse(StoragePoolJoinVO } long allocatedSize = pool.getUsedCapacity(); + poolResponse.setCapacityBytes(pool.getCapacityBytes()); poolResponse.setDiskSizeTotal(pool.getCapacityBytes()); poolResponse.setDiskSizeAllocated(allocatedSize); poolResponse.setCapacityIops(pool.getCapacityIops());