Skip to content

Commit

Permalink
Set storage quota on storage clients
Browse files Browse the repository at this point in the history
Signed-off-by: Alfonso Martínez <[email protected]>
  • Loading branch information
alfonsomthd committed Jul 15, 2024
1 parent 26510a3 commit 9ca9dee
Show file tree
Hide file tree
Showing 20 changed files with 832 additions and 160 deletions.
38 changes: 31 additions & 7 deletions locales/en/plugin__odf-console.json
Original file line number Diff line number Diff line change
Expand Up @@ -1102,32 +1102,56 @@
"NamespaceStore details": "NamespaceStore details",
"Target Blob Container": "Target Blob Container",
"Num Volumes": "Num Volumes",
"Cluster ID": "Cluster ID",
"<0>The amount of storage allocated to the client cluster for usage.</0><1>Due to simultaneous usage by multiple client clusters, actual available storage may vary affecting your allocated storage quota.</1>": "<0>The amount of storage allocated to the client cluster for usage.</0><1>Due to simultaneous usage by multiple client clusters, actual available storage may vary affecting your allocated storage quota.</1>",
"No storage clients found.": "No storage clients found.",
"You do not have any storage clients connected to this Data Foundation provider cluster.": "You do not have any storage clients connected to this Data Foundation provider cluster.",
"To connect a storage client to the Data Foundation provider cluster, click <2>Generate client onboarding token</2> and use the token to deploy the client cluster.": "To connect a storage client to the Data Foundation provider cluster, click <2>Generate client onboarding token</2> and use the token to deploy the client cluster.",
"Cluster name (ID)": "Cluster name (ID)",
"Storage quota": "Storage quota",
"Used capacity is the amount of storage consumed by the client.": "Used capacity is the amount of storage consumed by the client.",
"Openshift version": "Openshift version",
"Data Foundation version": "Data Foundation version",
"Last heartbeat": "Last heartbeat",
"ago": "ago",
"Client version is out of date": "Client version is out of date",
"Due to the mismatch in the client and provider version this provider cluster cannot be upgraded.": "Due to the mismatch in the client and provider version this provider cluster cannot be upgraded.",
"Edit storage quota": "Edit storage quota",
"Delete storage client": "Delete storage client",
"Storage clients": "Storage clients",
"Generate client onboarding token": "Generate client onboarding token",
"Rotate signing keys": "Rotate signing keys",
"Data Foundation version sync": "Data Foundation version sync",
"Client onboarding token": "Client onboarding token",
"Add storage capacity for the client cluster to consume from the provider cluster.": "Add storage capacity for the client cluster to consume from the provider cluster.",
"Can not generate an onboarding token at the moment": "Can not generate an onboarding token at the moment",
"The token generation service is currently unavailable. Contact our <2>customer support</2> for further help.": "The token generation service is currently unavailable. Contact our <2>customer support</2> for further help.",
"Generating token": "Generating token",
"Generate token": "Generate token",
"Unlimited": "Unlimited",
"Custom": "Custom",
"Storage quota:": "Storage quota:",
"Limit the amount of storage that a client cluster can consume.": "Limit the amount of storage that a client cluster can consume.",
"Allocate quota": "Allocate quota",
"Storage quota cannot be decreased. Assign a quota higher than your current allocation.": "Storage quota cannot be decreased. Assign a quota higher than your current allocation.",
"No specific limit on storage that a client can consume.": "No specific limit on storage that a client can consume.",
"Changing the storage quota from unlimited to custom is not supported after the client cluster is onboarded.": "Changing the storage quota from unlimited to custom is not supported after the client cluster is onboarded.",
"unlimited": "unlimited",
"Generated on": "Generated on",
"On an OpenShift cluster, deploy the Data Foundation client operator using the generated token. The token includes an <2>{quotaText}</2> storage quota for client consumption.": "On an OpenShift cluster, deploy the Data Foundation client operator using the generated token. The token includes an <2>{quotaText}</2> storage quota for client consumption.",
"Copy to clipboard": "Copy to clipboard",
"How to use this token?": "How to use this token?",
"An onboarding token is needed to connect an additional OpenShift cluster to a Data Foundation deployment. Copy the generated token and use it for deploying Data Foundation client operator on your OpenShift cluster.": "An onboarding token is needed to connect an additional OpenShift cluster to a Data Foundation deployment. Copy the generated token and use it for deploying Data Foundation client operator on your OpenShift cluster.",
"This token is valid for 48 hours and can only be used once.": "This token is valid for 48 hours and can only be used once.",
"This token is for one-time use only and is valid for 48 hours.": "This token is for one-time use only and is valid for 48 hours.",
"Permanently delete storage client?": "Permanently delete storage client?",
"Deleting the storage client {getName(resource)} will remove all Ceph/Rook resources and erase all data associated with this client, leading to permanent deletion of the client. This action cannot be undone. It will destroy all pods, services and other objects in the namespace <4>{{name}}</4>.": "Deleting the storage client {getName(resource)} will remove all Ceph/Rook resources and erase all data associated with this client, leading to permanent deletion of the client. This action cannot be undone. It will destroy all pods, services and other objects in the namespace <4>{{name}}</4>.",
"Confirm deletion by typing&nbsp;<1>{{name}}</1> below:": "Confirm deletion by typing&nbsp;<1>{{name}}</1> below:",
"Deleting the storage client <2>{getName(resource)}</2> will remove all Ceph/Rook resources and erase all data associated with this client, leading to permanent deletion of the client. This action cannot be undone. It will destroy all pods, services and other objects in the namespace <5>{{name}}</5>.": "Deleting the storage client <2>{getName(resource)}</2> will remove all Ceph/Rook resources and erase all data associated with this client, leading to permanent deletion of the client. This action cannot be undone. It will destroy all pods, services and other objects in the namespace <5>{{name}}</5>.",
"Confirm deletion by typing <2>{{name}}</2> below:": "Confirm deletion by typing <2>{{name}}</2> below:",
"Enter name": "Enter name",
"Type client name to confirm": "Type client name to confirm",
"This action will rotate the signing key currently used for generating and validating client onboarding tokens.": "This action will rotate the signing key currently used for generating and validating client onboarding tokens.",
"Upon rotation, the existing signing key will be revoked and replaced with a new one.": "Upon rotation, the existing signing key will be revoked and replaced with a new one.",
"Confirm": "Confirm",
"Storage quota request failed. Make sure your Data Foundation provider cluster has enough capacity before trying again.": "Storage quota request failed. Make sure your Data Foundation provider cluster has enough capacity before trying again.",
"Save changes": "Save changes",
"Cluster capacity not available at this moment.": "Cluster capacity not available at this moment.",
"Available capacity": "Available capacity",
"Raw Capacity": "Raw Capacity",
"Add Capacity": "Add Capacity",
"Cluster details": "Cluster details",
Expand Down Expand Up @@ -1193,6 +1217,7 @@
"Storage capacity utilised from the external object storage provider.": "Storage capacity utilised from the external object storage provider.",
"<0>What are the different performance profiles I can use to configure performance?</0><1>Performance profiles types:</1><2><0>Balanced mode:</0> Optimized for right amount of CPU and memory resources to support diverse workloads.</2><3><0>Lean mode:</0> Minimizes resource consumption by allocating fewer CPUs and less memory for resource-efficient operations.</3><4><0>Performance mode:</0> Tailored for high-performance, allocating ample CPUs and memory to ensure optimal execution of demanding workloads.</4>": "<0>What are the different performance profiles I can use to configure performance?</0><1>Performance profiles types:</1><2><0>Balanced mode:</0> Optimized for right amount of CPU and memory resources to support diverse workloads.</2><3><0>Lean mode:</0> Minimizes resource consumption by allocating fewer CPUs and less memory for resource-efficient operations.</3><4><0>Performance mode:</0> Tailored for high-performance, allocating ample CPUs and memory to ensure optimal execution of demanding workloads.</4>",
"For enhanced performance of the Data Foundation cluster, the number of CPUs and memory resources are determined based on the cluster environment, size and various other factors.": "For enhanced performance of the Data Foundation cluster, the number of CPUs and memory resources are determined based on the cluster environment, size and various other factors.",
"An onboarding token to authenticate and authorize an OpenShift cluster, granting access to the Data Foundation deployment, thus establishing a secure connection.": "An onboarding token to authenticate and authorize an OpenShift cluster, granting access to the Data Foundation deployment, thus establishing a secure connection.",
"Backing Store": "Backing Store",
"Bucket Class": "Bucket Class",
"Namespace Store": "Namespace Store",
Expand Down Expand Up @@ -1240,7 +1265,6 @@
"and": "and",
"GiB RAM": "GiB RAM",
"Configure Performance": "Configure Performance",
"Save changes": "Save changes",
"hr": "hr",
"min": "min",
"Select at least 2 Backing Store resources": "Select at least 2 Backing Store resources",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,11 @@ import {
import { ConfigMapModel } from '@odf/shared/models';
import { ConfigMapKind } from '@odf/shared/types';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { getInstantVectorStats, humanizeBinaryBytes } from '@odf/shared/utils';
import { humanizeBinaryBytes, parser } from '@odf/shared/utils';
import { useK8sWatchResource } from '@openshift-console/dynamic-plugin-sdk';
import { TFunction } from 'i18next';
import { Trans } from 'react-i18next';
import { useParams } from 'react-router-dom-v5-compat';
import { compose } from 'redux';
import {
Card,
CardBody,
Expand All @@ -40,8 +39,6 @@ import {
} from '../../../queries/ceph-storage';
import { ODFSystemParams } from '../../../types';

const parser = compose((val) => val?.[0]?.y, getInstantVectorStats);

const calculateDaysUp = (timespan: number): number | null => {
const daysPassed: number = timespan / (60 * 60 * 24);

Expand Down
Original file line number Diff line number Diff line change
@@ -1,53 +1,24 @@
import * as React from 'react';
import { useRawCapacity } from '@odf/core/hooks';
import { useODFSystemFlagsSelector } from '@odf/core/redux';
import {
useCustomPrometheusPoll,
usePrometheusBasePath,
} from '@odf/shared/hooks/custom-prometheus-poll';
import { useCustomTranslation } from '@odf/shared/useCustomTranslationHook';
import { getInstantVectorStats } from '@odf/shared/utils';
import { parser } from '@odf/shared/utils';
import { useParams } from 'react-router-dom-v5-compat';
import { compose } from 'redux';
import {
CAPACITY_INFO_QUERIES,
StorageDashboardQuery,
} from '../../../queries/ceph-storage';
import { ODFSystemParams } from '../../../types';
import {
CapacityCard,
CapacityCardProps,
} from '../../common/capacity-card/capacity-card';

// Enchance instantVectorStats to directly parse the values (else loading state won't be accurate)
const parser = compose((val) => val?.[0]?.y, getInstantVectorStats);

const RawCapacityCard: React.FC = () => {
const { t } = useCustomTranslation();

const { namespace: clusterNs } = useParams<ODFSystemParams>();
const { systemFlags } = useODFSystemFlagsSelector();
const managedByOCS = systemFlags[clusterNs]?.ocsClusterName;

const [totalCapacity, totalError, totalLoading] = useCustomPrometheusPoll({
query:
CAPACITY_INFO_QUERIES(managedByOCS)[
StorageDashboardQuery.RAW_CAPACITY_TOTAL
],
endpoint: 'api/v1/query' as any,
basePath: usePrometheusBasePath(),
});
const [usedCapacity, usedError, usedLoading] = useCustomPrometheusPoll({
query:
CAPACITY_INFO_QUERIES(managedByOCS)[
StorageDashboardQuery.RAW_CAPACITY_USED
],
endpoint: 'api/v1/query' as any,
basePath: usePrometheusBasePath(),
});

const loadError = totalError || usedError;
const clusterName = systemFlags[clusterNs]?.ocsClusterName;

const loading = usedLoading || totalLoading;
const [totalCapacity, usedCapacity, loading, loadError] =
useRawCapacity(clusterName);

const totalCapacityMetric = parser(totalCapacity);
const usedCapacityMetric = parser(usedCapacity);
Expand Down
14 changes: 14 additions & 0 deletions packages/odf/components/storage-consumers/client-list.scss
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
.odf-storage-client-list__no-client-msg {
margin-top: 12vh;
}

.odf-storage-client-list__no-client-msg-text {
color: var(--pf-v5-global--disabled-color--100);
text-align: center;
width: 43vw;
}

.odf-storage-client-list__no-client-msg-icon {
width: var(--pf-v5-global--icon--FontSize--xl);
height: var(--pf-v5-global--icon--FontSize--xl);
}
Loading

0 comments on commit 9ca9dee

Please sign in to comment.