diff --git a/shell/assets/translations/en-us.yaml b/shell/assets/translations/en-us.yaml index b950a4f1489..7aef96c5434 100644 --- a/shell/assets/translations/en-us.yaml +++ b/shell/assets/translations/en-us.yaml @@ -101,7 +101,7 @@ generic: deprecated: Deprecated placeholder: "e.g. {text}" - moreInfo: More Info + moreInfo: More Information selectors: label: Selector matchingResources: @@ -202,7 +202,7 @@ nav: restoreCards: Restore hidden cards userMenu: preferences: Preferences - accountAndKeys: Account & API Keys + accountAndKeys: Account and API Keys logOut: Log Out failWhale: authMiddleware: Auth Middleware @@ -216,7 +216,7 @@ nav: product: apps: Apps - auth: Users & Authentication + auth: Users and Authentication backup: Rancher Backups cis: CIS Benchmark ecm: Cluster Manager @@ -350,10 +350,10 @@ accountAndKeys: accessKey: Access Key secretKey: Secret Key bearerToken: Bearer Token - saveWarning: Save the info above! This is the only time you'll be able to see it. If you lose it, you'll need to create a new API key. - keyCreated: A new API Key has been created - bearerTokenTip: "Access Key and Secret Key can be sent as the username and password for HTTP Basic auth to authorize requests. You can also combine them to use as a Bearer token:" - ttlLimitedWarning: The Expiry time for this API Key was reduced due to system configuration + saveWarning: Save the above information! This is the only time you will be able to see it. If you lose it, you will need to create a new API key. + keyCreated: A new API key has been created + bearerTokenTip: "Access Key and Secret Key can be sent as the username and password for HTTP basic authentication to authorize requests. You can also combine them to use as a Bearer token:" + ttlLimitedWarning: The Expiry time for this API key was reduced due to system configuration addClusterMemberDialog: title: Add Cluster Member @@ -368,8 +368,8 @@ addProjectMemberDialog: authConfig: accessMode: label: 'Configure who should be able to login and use {vendor}' - required: Restrict access to only the authorized users & groups - restricted: 'Allow members of clusters and projects, plus authorized users & groups' + required: Restrict access to only the authorized users and groups + restricted: 'Allow members of clusters and projects, plus authorized users and groups' unrestricted: Allow any valid user allowedPrincipalIds: title: Authorized Users & Groups @@ -397,7 +397,7 @@ authConfig: 3:
{ns}
namespace that has an encryption-provider-config.yaml
key. {namespace}
, already exists and cannot be added to a different project."
- project: Install into Project
+ project: Install Into Project
section:
chartOptions: Edit Options
valuesYaml: Edit YAML
@@ -1007,8 +1007,8 @@ catalog:
} the {existing, select,
true { app}
false { chart}
- }. Start by setting some basic information used by {vendor} to manage the App.
- nsCreationDescription: "To install the app into a new namespace enter it's name in the Namespace field and select it."
+ }. Start by setting some basic information used by {vendor} to manage the application.
+ nsCreationDescription: "To install the application into a new namespace, enter the name in the Namespace field and select it."
createNamespace: "Namespace {namespace}
will be created."
clusterTplVersion:
label: Version
@@ -1016,19 +1016,19 @@ catalog:
description: Select a version of the Cluster Template
clusterTplValues:
label: Values
- subtext: Change how the Cluster is defined
- description: Configure Values used by Helm that help define the Cluster.
+ subtext: Change how the cluster is defined
+ description: Configure Values used by Helm that help define the cluster.
helmValues:
label: Values
- subtext: Change how the App works
- description: Configure Values used by Helm that help define the App.
+ subtext: Change how the application works
+ description: Configure values used by Helm that help define the application.
chartInfo:
- button: View Chart Info
- label: Chart Info
+ button: View Chart Information
+ label: Chart Information
helmCli:
- checkbox: Customize Helm options before install
+ checkbox: Customize Helm options before installation
label: Helm Options
- subtext: Change how the app is deployed
+ subtext: Change how the application is deployed
description: Supply additional deployment options
version: Version
versions:
@@ -1050,7 +1050,7 @@ catalog:
gitBranch:
label: Git Branch
placeholder: e.g. master
- defaultMessage: 'Will default to "master" if left blank'
+ defaultMessage: 'The branch will default to "master" if left blank'
gitRepo:
label: Git Repo URL
placeholder: 'e.g. https://github.com/your-company/charts.git'
@@ -1227,7 +1227,7 @@ cluster:
configuration: Multus
agentEnvVars:
label: Agent Environment
- detail: Add additional environment variables to the agent container. This is most commonly useful for configuring a HTTP proxy.
+ detail: Add additional environment variables to the agent container. This is most commonly useful for configuring a HTTP proxy.
keyLabel: Variable Name
cloudProvider:
aws:
@@ -1252,7 +1252,7 @@ cluster:
warning: The cluster needs to have at least one node with each role to be usable.
advanced:
label: Advanced
- detail: Additional control over how the node will be registered. These values will often need to be different for each node registered.
+ detail: Additional control over how the node will be registered. These values will often need to be different for each node registered.
nodeName: Node Name
publicIp: Node Public IP
privateIp: Node Private IP
@@ -1265,14 +1265,14 @@ cluster:
windowsDetail: Run this command in PowerShell on each of the existing Windows machines you want to register. Windows nodes can only be workers.
windowsNotReady: The cluster must be up and running with Linux etcd, control plane, and worker nodes before the registration command for adding Windows workers will display.
windowsWarning: Workload pods, including some deployed by Rancher charts, will be scheduled on both Linux and Windows nodes by default. Edit NodeSelector in the chart to direct them to be placed onto a compatible node.
- windowsDeprecatedForRKE1: Windows support is being deprecated for RKE1. We suggest migrating to RKE2.
+ windowsDeprecatedForRKE1: Windows support is being deprecated for RKE1 and RKE1 is soon to be deprecrated. Please migrate to RKE2.
insecure: "Insecure: Select this to skip TLS verification if your server has a self-signed certificate."
credential:
banner:
createCredential: |-
{length, plural,
- =0 {First you'll need to create a credential to talk to the cloud provider}
- other {Ok, Let's create a new credential}
+ =0 {First, you will need to create a credential to talk to the cloud provider}
+ other {Ok, start to create a new credential}
}
selectExisting:
label: Select Existing
@@ -1285,7 +1285,7 @@ cluster:
label: Access Key
placeholder: Your AWS Access Key
defaultRegion:
- help: The default region to use when creating clusters. Also contacted to verify that this credential works.
+ help: The default region to use when creating clusters. Also contacted to verify that this credential works.
label: Default Region
secretKey:
label: Secret Key
@@ -1395,7 +1395,7 @@ cluster:
volume: Volume
imageVolume: Image Volume
addVolume: Add Volume
- addVMImage: Add VM Image
+ addVMImage: Add Virtual Machine Image
storageClass: Storage Class
sshUser: SSH User
userData:
@@ -1412,9 +1412,9 @@ cluster:
tokenExpirationWarning: 'Warning: Harvester Cloud Credentials use an underlying authentication token that may have an expiry time - please see the following knowledge base article for possible implications on management operations.'
description:
label: Cluster Description
- placeholder: Any text you want that better describes this cluster
+ placeholder: Any text to describe this cluster
harvester:
- importNotice: Import Harvester Clusters via
+ importNotice: Import Harvester Clusters Via
warning:
label: This is a Harvester Cluster - enable the Harvester feature flag to manage it
state: Warning
@@ -1447,11 +1447,11 @@ cluster:
sshUser:
placeholder: e.g. ubuntu
toolTip: SSH user to login with the selected OS image.
- haveOneOwner: There must be at least one member with the Owner role.
+ haveOneOwner: There must be at least one member with the owner role.
import:
warningBanner: 'You should not import a cluster which has already been connected to another instance of Rancher as it will lead to data corruption.'
commandInstructions: 'Run the kubectl
command below on an existing Kubernetes cluster running a supported Kubernetes version to import it into {vendor}:'
- commandInstructionsInsecure: 'If you get a "certificate signed by unknown authority" error, your {vendor} installation has a self-signed or untrusted SSL certificate. Run the command below instead to bypass the certificate verification:'
+ commandInstructionsInsecure: 'If you get a "certificate signed by unknown authority" error, your {vendor} installation has a self-signed or untrusted SSL certificate. Run the command below instead to bypass the certificate verification:'
clusterRoleBindingInstructions: 'If you get permission errors creating some of the resources, your user may not have the cluster-admin
role. Use this command to apply it:'
clusterRoleBindingCommand: 'kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user /etc/alertmanager/secrets/label: Additional Secrets - existing: Choose an existing config secret + existing: Choose an existing configuration secret info: | Create default config:
cattle-monitoring-systemnamespace on deploying this chart under the name
alertmanager-rancher-monitoring-alertmanager. By default, this Secret will never be modified on an uninstall or upgrade of this chart.
cattle-monitoring-systemnamespace. If the namespace does not exist, you will not be able to select an existing secret. label: Alertmanager Secret - new: Create default config + new: Create default configuration radio: - label: Config Secret + label: Configuration Secret validation: duplicatedReceiverName: A receiver with the name {name} already exists. templates: @@ -3476,7 +3476,7 @@ monitoring: adminApi: Admin API evaluation: Evaluation Interval ignoreNamespaceSelectors: - help: 'Ignoring Namespace Selectors allows Cluster Admins to limit teams from monitoring resources outside of namespaces they have permissions to but can break the functionality of Apps that rely on setting up Monitors that scrape targets across multiple namespaces, such as Istio.' + help: 'Ignoring Namespace Selectors allows cluster admins to limit teams from monitoring resources outside of namespaces they have permissions to but can break the functionality of applications that rely on setting up monitors that scrape targets across multiple namespaces, such as Istio.' label: Namespace Selectors radio: enforced: 'Use: Monitors can access resources based on namespaces that match the namespace selector field' @@ -3496,13 +3496,13 @@ monitoring: label: Persistent Storage for Prometheus mode: Access Mode selector: Selector - selectorWarning: 'If you are using a dynamic provisioner (e.g. Longhorn), no Selectors should be specified since a PVC with a non-empty selector can''t have a PV dynamically provisioned for it.' + selectorWarning: 'If you are using a dynamic provisioner (e.g. Longhorn), no selectors should be specified since a PVC with a non-empty selector cannot have a PV dynamically provisioned for it.' size: Size volumeName: Volume Name title: Configure Prometheus warningInstalled: | Warning: Prometheus Operators are currently deployed. Deploying multiple Prometheus Operators onto one cluster is not currently supported. Please remove all other Prometheus Operator deployments from this cluster before trying to install this chart. - If you are migrating from an older version of {vendor} with Monitoring enabled, please disable Monitoring on this cluster completely before attempting to install this chart. + If you are migrating from an older version of {vendor} with monitoring enabled, please disable monitoring on this cluster completely before attempting to install this chart. receiver: addReceiver: Add Receiver fields: @@ -3521,13 +3521,13 @@ monitoring: secretsBanner: The file paths below must be referenced in
alertmanager.alertmanagerSpec.secretswhen deploying the Monitoring chart. For more information see our documentation. projectMonitoring: detail: - error: "Unable to fetch Dashboard values with status: " + error: "Unable to fetch dashboard values with status: " list: - banner: Project Monitoring Configuration is stored in ProjectHelmChart resources + banner: Project monitoring configuration is stored in ProjectHelmChart resources empty: - message: Project Monitoring has not been configured for any projects - canCreate: Get started by clicking Create to add monitoring to a project - cannotCreate: Contact the admin to add project monitoring + message: Project monitoring has not been configured for any projects + canCreate: Get started by clicking create to add monitoring to a project + cannotCreate: Contact the administrator to add project monitoring route: label: Route fields: @@ -3542,9 +3542,9 @@ monitoring: alertmanagerConfig: description: Routes and receivers for project alerting and cluster alerting are configured within AlertmanagerConfig resources. empty: Alerts have not been configured for any accessible namespaces. - getStarted: Get started by clicking Create to configure an alert. + getStarted: Get started by clicking create to configure an alert. receiverTooltip: This route will direct alerts to the selected receiver, which must be defined in the same AlertmanagerConfig. - deprecationWarning: The Route and Receiver resources are deprecated. Going forward, routes and receivers should not be managed as separate Kubernetes resources on this page. They should be configured as YAML fields in an AlertmanagerConfig resource. + deprecationWarning: The route and receiver resources are deprecated. Going forward, routes and receivers should not be managed as separate Kubernetes resources on this page. They should be configured as YAML fields in an AlertmanagerConfig resource. routeInfo: This form supports configuring one route that directs traffic to a receiver. Alerts can be directed to more receiver(s) by configuring child routes in YAML. receiverFormNames: create: Create Receiver in AlertmanagerConfig @@ -3577,34 +3577,34 @@ monitoring: grafana: Grafana prometheus: Prometheus projectMetrics: Project Metrics - v1Warning: 'Monitoring is currently deployed from Cluster Manager. If you are migrating from an older version of {vendor} with monitoring enabled, please disable monitoring in Cluster Manager before attempting to install the new {vendor} Monitoring chart in Cluster Explorer.' + v1Warning: 'Monitoring is currently deployed from cluster manager. If you are migrating from an older version of {vendor} with monitoring enabled, please disable monitoring in cluster manager before attempting to install the new {vendor} monitoring chart in cluster explorer.' monitoringReceiver: addButton: Add {type} custom: label: Custom - title: Custom Config - info: The YAML provided here will be directly appended to your receiver within the Alertmanager Config Secret. + title: Custom Configuration + info: The YAML provided here will be directly appended to your receiver within the Alertmanager configuration secret. email: label: Email - title: Email Config + title: Email Configuration opsgenie: label: Opsgenie - title: Opsgenie Config + title: Opsgenie Configuration pagerduty: label: PagerDuty - title: PagerDuty Config + title: PagerDuty Configuration info: "You can find additional info on creating an Integration Key for PagerDuty here." slack: label: Slack - title: Slack Config + title: Slack Configuration info: "You can find additional info on creating Incoming Webhooks for Slack here ." webhook: label: Webhook - title: Webhook Config - urlTooltip: For some webhooks this a url that points to the service DNS - modifyNamespace: If
rancher-alerting-driversdefault values were changed, please update the url below in the format http://<new_service_name>.<new_namespace>.svc.<port>/<path> - banner: To use MS Teams or SMS you will need to have at least one instance of
rancher-alerting-driversinstalled first. + title: Webhook Configuration + urlTooltip: For some webhooks this a URL that point to the service DNS + modifyNamespace: If
rancher-alerting-driversdefault values were changed, please update the URL below in the format http://<new_service_name>.<new_namespace>.svc.<port>/<path> + banner: To use MS Teams or SMS, you will need to have at least one instance of
rancher-alerting-driversinstalled first. add: selectWebhookType: Select Webhook Type generic: Generic @@ -3639,7 +3639,7 @@ monitoringReceiver: label: Enable send resolved alerts alertmanagerConfigReceiver: - secretKeyId: Key Id from Secret + secretKeyId: Key ID from Secret name: Receiver Name addButton: Add Receiver receivers: Receivers @@ -3653,7 +3653,7 @@ monitoringRoute: label: Group By addGroupByLabel: Labels to Group Alerts By groupByTooltip: Add each label as a string in the format key:value. The special label ... will aggregate by all possible labels. If provided, the ... must be the only element in the list. - info: This is the top-level Route used by Alertmanager as the default destination for any Alerts that do not match any other Routes. This Route must exist and cannot be deleted. + info: This is the top-level route used by Alertmanager as the default destination for any alerts that do not match any other routes. This route must exist and cannot be deleted. interval: label: Group Interval matching: @@ -3805,7 +3805,7 @@ networkpolicy: ruleHint: Incoming traffic is only allowed from the configured sources portHint: Incoming traffic is only allowed to connect to the configured ports labelsAnnotations: - label: Labels & Annotations + label: Labels and Annotations rules: pod: Pod namespace: Namespace @@ -3836,12 +3836,12 @@ networkpolicy: namespaceSelector: label: Namespace Selector namespaceAndPodSelector: - label: Namespace/Pod Selector + label: Namespace and Pod Selector config: label: Configuration selectors: label: Selectors - hint: The NetworkPolicy is applied to the selected Pods + hint: The NetworkPolicy is applied to the selected pods matchingPods: matchesSome: |- {matched, plural, @@ -3893,8 +3893,8 @@ node: used: Used amount: "{used} of {total} {unit}" cpu: CPU - memory: MEMORY - pods: PODS + memory: Memory + pods: Pods diskPressure: Disk Pressure kubelet: kubelet memoryPressure: Memory Pressure @@ -4107,7 +4107,7 @@ persistentVolume: portals: add: Add Portal cinder: - label: Openstack Cinder Volume (Unsupported) + label: OpenStack Cinder Volume (Unsupported) volumeId: label: Volume ID placeholder: e.g. vol @@ -4192,7 +4192,7 @@ persistentVolume: label: Path on the Node placeholder: /mnt/disks/ssd1 mustBe: - label: The Path on the Node must be + label: The path on the node must be anything: 'Anything: do not check the target path' directory: A directory, or create if it does not exist file: A file, or create if it does not exist @@ -4255,8 +4255,8 @@ persistentVolumeClaim: source: label: Source options: - new: Use a Storage Class to provision a new Persistent Volume - existing: Use an existing Persistent Volume + new: Use a storage class to provision a new persistent volume + existing: Use an existing persistent volume expand: label: Expand notSupported: Storage class does not support volume expansion @@ -4267,8 +4267,8 @@ persistentVolumeClaim: requestStorage: Request Storage persistentVolume: Persistent Volume tooltips: - noStorageClass: You don't have permission to list Storage Classes, enter a name manually - noPersistentVolume: You don't have permission to list Persistent Volumes, enter a name manually + noStorageClass: You do not have permission to list storage classes, enter a name manually + noPersistentVolume: You do not have permission to list persistent volumes, enter a name manually customize: label: Customize accessModes: @@ -4312,10 +4312,10 @@ plugins: installing: Installing ... uninstalling: Uninstalling ... descriptions: - experimental: This Extension is marked as experimental - third-party: This Extension is provided by a Third-Party - built-in: This Extension is built-in - image: This Extension Image has been loaded manually + experimental: This extension is marked as experimental + third-party: This extension is provided by a third-party + built-in: This extension is built-in + image: This extension image has been loaded manually error: title: Error loading extension message: Could not load extension code @@ -4349,10 +4349,10 @@ plugins: requiresHost: 'Requires a host that matches "{mainHost}"' empty: all: Extensions are neither installed nor available - available: No Extensions available - installed: No Extensions installed - updates: No updates available for installed Extensions - images: No Extension Images installed + available: No extension available + installed: No extension installed + updates: No updates available for installed extension + images: No extension images installed loadError: An error occurred loading the code for this extension helmError: "An error occurred installing the extension via Helm" manageRepos: Manage Repositories @@ -4392,7 +4392,7 @@ plugins: message: A repository with the name {repo} already exists success: title: "Imported Extension Catalog from: {name}" - message: Extension Catalog image was imported successfully + message: Extension catalog image was imported successfully headers: image: name: images @@ -4410,17 +4410,17 @@ plugins: install: label: Install title: Install Extension {name} - prompt: "Are you sure that you want to install this Extension?" + prompt: "Are you sure that you want to install this extension?" version: Version - warnNotCertified: Please ensure that you are aware of the risks of installing Extensions from untrusted authors + warnNotCertified: Please ensure that you are aware of the risks of installing extensions from untrusted authors update: label: Update title: Update Extension {name} - prompt: "Are you sure that you want to update this Extension?" + prompt: "Are you sure that you want to update this extension?" rollback: label: Rollback title: Rollback Extension {name} - prompt: "Are you sure that you want to rollback this Extension?" + prompt: "Are you sure that you want to rollback this extension?" uninstall: label: Uninstall title: "Uninstall Extension: {name}" @@ -4456,7 +4456,7 @@ plugins: remove: label: Disable Extension Support title: Disable Extension Support? - prompt: This will un-install the Helm charts that enable Extension support + prompt: This will un-install the Helm charts that enable extension support registry: official: title: Remove the Official Rancher Extensions Repository @@ -4486,7 +4486,7 @@ podSecurityAdmission: placeholder: 'Version (default: latest)' exemptions: title: Exemptions - description: Allow the creation of pods for specific Usernames, RuntimeClassNames, and Namespaces that would otherwise be prohibited due to the policies set above. + description: Allow the creation of pods for specific usernames, RuntimeClassNames, and namespaces that would otherwise be prohibited due to the policies set above. placeholder: Enter a comma separated list of {psaExemptionsControl} prefs: title: Preferences @@ -4582,7 +4582,7 @@ project: members: label: Members containerDefaultResourceLimit: Container Default Resource Limit - vmDefaultResourceLimit: VM Default Resource Limit + vmDefaultResourceLimit: Virtual Machine Default Resource Limit resourceQuotas: Resource Quotas haveOneOwner: There must be at least one member with the Owner role. @@ -4592,23 +4592,23 @@ projectMembers: label: Project projectPermissions: label: Project Permissions - description: Controls what access users have to the Project + description: Controls what access users have to the project noDescription: User created - no description searchForMember: Search for a member to provide project access owner: label: Owner - description: Owners have full control over the Project and all resources inside it. + description: Owners have full control over the project and all resources inside it. member: label: Member - description: Members can manage the resources inside the Project but not change the Project itself. + description: Members can manage the resources inside the project but not change the project itself. readOnly: label: Read Only - description: Members can only view the resources inside the Project but not change the resources. + description: Members can only view the resources inside the project but not change the resources. custom: label: Custom description: Choose individual roles for this user. createNs: Create Namespaces - configmapsManage: Manage Config Maps + configmapsManage: Manage Configuration Maps ingressManage: Manage Ingress projectcatalogsManage: Manage Project Catalogs projectroletemplatebindingsManage: Manage Project Members @@ -4653,7 +4653,7 @@ prometheusRule: summary: input: Summary Annotation Value label: Summary - bannerText: 'When firing alerts, the annotations and labels will be passed to the configured AlertManagers to allow them to construct the notification that will be sent to any configured Receivers.' + bannerText: 'When firing alerts, the annotations and labels will be passed to the configured AlertManagers to allow them to construct the notification that will be sent to any configured receivers.' for: label: Wait to fire for placeholder: '60' @@ -4692,14 +4692,14 @@ prometheusRule: promptForceRemove: modalTitle: Are you sure? - removeWarning: "There was an issue with deleting underlying infrastructure. If you proceed with this action, the Machine {nameToMatch} will be deleted from Rancher only. It's highly recommended to manually delete any referenced infrastructure." + removeWarning: "There was an issue with deleting underlying infrastructure. If you proceed with this action, the Machine {nameToMatch} will be deleted from Rancher only. We recommend to manually delete any referenced infrastructure." forceDelete: Force Delete confirmName: "Enter in the pool name below to confirm:" podRemoveWarning: "Force deleting pods does not wait for confirmation that the pod's processes have been terminated. This may result in data corruption or inconsistencies" promptScaleMachineDown: attemptingToRemove: "You are attempting to delete {count} {type}" - retainedMachine1: At least one Machine must exist for roles Control Plane and Etcd. + retainedMachine1: At least one machine must exist for roles control plane and Etcd. retainedMachine2: { name } will remain promptSlo: @@ -4720,7 +4720,7 @@ promptRemove: other { and {count} others.} } attemptingToRemove: "You are attempting to delete the {type}" - attemptingToRemoveAuthConfig: "You are attempting to disable this Auth Provider.
docker ps
, then run:'
dockerSuffix: ""
@@ -5633,7 +5633,7 @@ tableHeaders:
apiGroup: API Groups
apikey: API Key
available: Available
- attachedVM: Attached VM
+ attachedVM: Attached Virtual Machine
authRoles:
globalDefault: New User Default
@@ -5736,7 +5736,7 @@ tableHeaders:
namespaceName: Name
namespaceNameUnlinked: Name
networkType: Type
- networkVlan: Vlan ID
+ networkVlan: VLAN ID
node: Node
nodeName: Node Name
nodesReady: Nodes Ready
@@ -5950,7 +5950,7 @@ validation:
name: Cluster name cannot be 'local' or take the form 'c-xxxxx'
conflict: |-
This resource has been modified since you started editing it, and some of those modifications conflict with your changes.
- This screen has been updated to reflect the current values on the cluster. Review and reapply the changes you wanted to make, then Save again.
+ This screen has been updated to reflect the current values on the cluster. Review and reapply the changes you wanted to make, then save again.
Conflicting {fieldCount, plural, =1 {field} other {fields}}: {fields}
custom:
missing: 'No validator exists for { validatorName }! Does the validator exist in custom-validators? Is the name spelled correctly?'
@@ -5979,7 +5979,7 @@ validation:
global: Requires "Cluster Output" to be selected.
output:
logdna:
- apiKey: Required an "Api Key" to be set.
+ apiKey: Required an "API Key" to be set.
invalidCron: Invalid cron schedule
invalidCidr: "Invalid CIDR"
invalidIP: "Invalid IP"
@@ -6021,21 +6021,22 @@ validation:
port: A port must be a number between 1 and 65535.
path: '"{key}" must be an absolute path'
prometheusRule:
- noEdit: This Prometheus Rule may not be edited due to invalid characters in name.
+ noEdit: This Prometheus rule may not be edited due to invalid characters in name.
groups:
required: At least one rule group is required.
singleAlert: A rule may contain alert rules or recording rules but not both.
valid:
name: 'Name is required for rule group {index}.'
rule:
- alertName: 'Rule group {groupIndex} rule {ruleIndex} requires a Alert Name.'
- expr: 'Rule group {groupIndex} rule {ruleIndex} requires a PromQL Expression.'
+ alertName: 'Rule group {groupIndex} rule {ruleIndex} requires a alert name.'
+ expr: 'Rule group {groupIndex} rule {ruleIndex} requires a PromQL expression.'
labels: 'Rule group {groupIndex} rule {ruleIndex} requires at least one label. Severity is recommended.'
- recordName: 'Rule group {groupIndex} rule {ruleIndex} requires a Time Series Name.'
+ recordName: 'Rule group {groupIndex} rule {ruleIndex} requires a time series name.'
singleEntry: 'At least one alert rule or one recording rule is required in rule group {index}.'
required: '"{key}" is required'
invalid: '"{key}" is invalid'
requiredOrOverride: '"{key}" is required or must allow override'
+ arrayCountRequired: "At least {count} {key} {count, plural, =1 {is} other {are}} required, and {key} can not be empty."
roleTemplate:
roleTemplateRules:
missingVerb: You must specify at least one verb for each resource grant
@@ -6045,7 +6046,7 @@ validation:
noResourceAndNonResource: Each rule may contain Resources or Non-Resource URLs but not both
service:
externalName:
- none: External Name is required on an ExternalName Service.
+ none: External name is required on an ExternalName service.
ports:
name:
required: 'Port Rule [{position}] - Name is required.'
@@ -6076,7 +6077,7 @@ validation:
missingProjectId: A target must have a project selected.
monitoring:
route:
- match: At least one Match or Match Regex must be selected
+ match: At least one match or match regex must be selected
interval: '"{key}" must be of a format with digits followed by a unit i.e. 1h, 2m, 30s'
tab: "One or more fields in this tab contain a form validation error"
@@ -6169,9 +6170,9 @@ workload:
initialDelay: Initial Delay
livenessProbe: Liveness Check
livenessTip: Containers will be restarted when this check is failing. Not recommended for most uses.
- noHealthCheck: "There is not a Readiness Check, Liveness Check or Startup Check configured."
+ noHealthCheck: "There is not a readiness check, liveness check or startup check configured."
readinessProbe: Readiness Checks
- readinessTip: Containers will be removed from service endpoints when this check is failing. Recommended.
+ readinessTip: Containers will be removed from service endpoints when this check is failing. Recommended.
startupProbe: Startup Check
startupTip: Containers will wait until this check succeeds before attempting other health checks.
successThreshold: Success Threshold
@@ -6239,9 +6240,9 @@ workload:
noServiceAccess: You do not have permission to create or manage services
ports:
expose: Networking
- description: 'Define a Service to expose the container, or define a non-functional, named port so that humans will know where the app within the container is expected to run.'
- detailedDescription: If ClusterIP, LoadBalancer, or NodePort is selected, a Service is automatically created that will select the Pods in this workload using labels.
- toolTip: 'For help exposing workloads on Kubernetes, see the official Kubernetes documentation on Services. You can also manually create a Service to expose Pods by selecting their labels, and you can use an Ingress to map HTTP routes to Services.'
+ description: 'Define a service to expose the container, or define a non-functional, named port so that other users will know where the application within the container is expected to run.'
+ detailedDescription: If ClusterIP, LoadBalancer, or NodePort is selected, a service is automatically created that will select the pods in this workload using labels.
+ toolTip: 'For help exposing workloads on Kubernetes, see the official Kubernetes documentation on services. You can also manually create a service to expose pods by selecting their labels, and you can use an ingress to map HTTP routes to services.'
createService: Service Type
noCreateService: Do not create a service
containerPort: Private Container Port
@@ -6315,13 +6316,13 @@ workload:
detail:
services: Services
ingresses: Ingresses
- cannotViewServices: Could not list Services due to lack of permission.
- cannotFindServices: Could not find any Services that select Pods from this workload.
- serviceListCaption: "The following Services select Pods from this workload:"
- cannotViewIngresses: Could not list Ingresses due to lack of permission.
- cannotFindIngresses: Could not find any Ingresses that forward traffic to Services that select Pods in this workload.
- ingressListCaption: "The following Ingresses forward traffic to Services that select Pods from this workload:"
- cannotViewIngressesBecauseCannotViewServices: Could not find relevant relevant Ingresses due to lack of permission to view Services.
+ cannotViewServices: Could not list services due to lack of permission.
+ cannotFindServices: Could not find any services that select pods from this workload.
+ serviceListCaption: "The following services select pods from this workload:"
+ cannotViewIngresses: Could not list ingresses due to lack of permission.
+ cannotFindIngresses: Could not find any ingresses that forward traffic to services that select pods in this workload.
+ ingressListCaption: "The following ingresses forward traffic to services that select pods from this workload:"
+ cannotViewIngressesBecauseCannotViewServices: Could not find relevant relevant ingresses due to lack of permission to view services.
pods:
title: Pods
detailTop:
@@ -6511,7 +6512,7 @@ workload:
addMount: Add Mount
addVolume: Add Volume
selectVolume: Select Volume
- noVolumes: Volumes will appear here after they are added in the Pod tab
+ noVolumes: Volumes will appear here after they are added in the pod tab
certificate: Certificate
csi:
diskName: Disk Name
@@ -6542,12 +6543,12 @@ workload:
defaultMode: Default Mode
driver: driver
hostPath:
- label: The Path on the Node must be
+ label: The Path on the node must be
options:
default: 'Anything: do not check the target path'
- directoryOrCreate: A directory, or create if it doesn't exist
+ directoryOrCreate: A directory, or create if it does not exist
directory: An existing directory
- fileOrCreate: A file, or create if it doesn't exist
+ fileOrCreate: A file, or create if it does not exist
file: An existing file
socket: An existing socket
charDevice: An existing character device
@@ -6576,11 +6577,11 @@ workload:
placeholder: "e.g. 300"
typeDescriptions:
apps.daemonset: DaemonSets run exactly one pod on every eligible node. When new nodes are added to the cluster, DaemonSets automatically deploy to them. Recommended for system-wide or vertically-scalable workloads that never need more than one pod per node.
- apps.deployment: Deployments run a scalable number of replicas of a pod distributed among the eligible nodes. Changes are rolled out incrementally and can be rolled back to the previous revision when needed. Recommended for stateless & horizontally-scalable workloads.
+ apps.deployment: Deployments run a scalable number of replicas of a pod distributed among the eligible nodes. Changes are rolled out incrementally and can be rolled back to the previous revision when needed. Recommended for stateless and horizontally-scalable workloads.
apps.statefulset: StatefulSets manage stateful applications and provide guarantees about the ordering and uniqueness of the pods created. Recommended for workloads with persistent storage or strict identity, quorum, or upgrade order requirements.
- batch.cronjob: CronJobs create Jobs, which then run Pods, on a repeating schedule. The schedule is expressed in standard Unix cron format, and uses the timezone of the Kubernetes control plane (typically UTC).
+ batch.cronjob: CronJobs create jobs, which then run pods, on a repeating schedule. The schedule is expressed in standard Unix cron format, and uses the timezone of the Kubernetes control plane (typically UTC).
batch.job: Jobs create one or more pods to reliably perform a one-time task by running a pod until it exits successfully. Failed pods are automatically replaced until the specified number of completed runs has been reached. Jobs can also run multiple pods in parallel or function as a batch work queue.
- pod: Pods are the smallest deployable units of computing that you can create and manage in Kubernetes. A Pod is a group of one or more containers, with shared storage and network resources, and a specification for how to run the containers.
+ pod: Pods are the smallest deployable units of computing that you can create and manage in Kubernetes. A pod is a group of one or more containers, with shared storage and network resources, and a specification for how to run the containers.
upgrading:
activeDeadlineSeconds:
label: Pod Active Deadline
@@ -6589,8 +6590,8 @@ workload:
label: Concurrency
options:
allow: Allow CronJobs to run concurrently
- forbid: Skip next run if current run hasn't finished
- replace: Replace run if current run hasn't finished
+ forbid: Skip next run if current run has not finished
+ replace: Replace run if current run has not finished
maxSurge:
label: Max Surge
tip: The maximum number of pods allowed beyond the desired scale at any given time.
@@ -6612,7 +6613,7 @@ workload:
labels:
delete: "On Delete: New pods are only created when old pods are manually deleted."
recreate: "Recreate: Kill ALL pods, then start new pods."
- rollingUpdate: "Rolling Update: Create new pods, until max surge is reached, before deleting old pods. Don't stop more pods than max unavailable."
+ rollingUpdate: "Rolling Update: Create new pods, until max surge is reached, before deleting old pods. Do not stop more pods than max unavailable."
terminationGracePeriodSeconds:
label: Termination Grace Period
tip: The duration the pod needs to terminate successfully.
@@ -6710,24 +6711,24 @@ typeDescription:
cis.cattle.io.clusterscanprofile: A profile is the configuration for the CIS scan, which is the benchmark versions to use and any specific tests to skip in that benchmark.
cis.cattle.io.clusterscan: A scan is created to trigger a CIS scan on the cluster based on the defined profile. A report is created after the scan is completed.
cis.cattle.io.clusterscanreport: A report is the result of a CIS scan of the cluster.
- management.cattle.io.feature: Feature Flags allow certain {vendor} features to be toggled on and off. Features that are off by default should be considered experimental functionality.
- cluster.x-k8s.io.machine: A Machine encapsulates the configuration of a Kubernetes Node. Use this view to see what happens after updating a cluster.
- cluster.x-k8s.io.machinedeployment: A Machine Deployment orchestrates deployments via templates over a collection of Machine Sets (similar to a Deployment). Use this view to see what happens after updating a cluster.
- cluster.x-k8s.io.machineset: A Machine Set ensures the desired number of Machine resources are up and running at all times (similar to a ReplicaSet). Use this view to see what happens after updating a cluster.
+ management.cattle.io.feature: Feature flags allow certain {vendor} features to be toggled on and off. Features that are off by default should be considered experimental functionality.
+ cluster.x-k8s.io.machine: A machine encapsulates the configuration of a Kubernetes node. Use this view to see what happens after updating a cluster.
+ cluster.x-k8s.io.machinedeployment: A machine deployment orchestrates deployments via templates over a collection of machine sets (similar to a deployment). Use this view to see what happens after updating a cluster.
+ cluster.x-k8s.io.machineset: A machine set ensures the desired number of machine resources are up and running at all times (similar to a ReplicaSet). Use this view to see what happens after updating a cluster.
resources.cattle.io.backup: A backup is created to perform one-time backups or schedule recurring backups based on a ResourceSet.
resources.cattle.io.restore: A restore is created to trigger a restore to the cluster based on a backup file.
resources.cattle.io.resourceset: A resource set defines which CRDs and resources to store in the backup.
monitoring.coreos.com.servicemonitor: A service monitor defines the group of services and the endpoints that Prometheus will scrape for metrics. This is the most common way to define metrics collection.
- monitoring.coreos.com.podmonitor: A pod monitor defines the group of pods that Prometheus will scrape for metrics. The common way is to use service monitors, but pod monitors allow you to handle any situation where a service monitor wouldn't work.
- monitoring.coreos.com.prometheusrule: A Prometheus Rule resource defines both recording and/or alert rules. A recording rule can pre-compute values and save the results. Alerting rules allow you to define conditions on when to send notifications to AlertManager.
+ monitoring.coreos.com.podmonitor: A pod monitor defines the group of pods that Prometheus will scrape for metrics. The common way is to use service monitors, but pod monitors allow you to handle any situation where a service monitor would not work.
+ monitoring.coreos.com.prometheusrule: A Prometheus rule resource defines both recording or alert rules. A recording rule can pre-compute values and save the results. Alerting rules allow you to define conditions on when to send notifications to AlertManager.
monitoring.coreos.com.prometheus: A Prometheus server is a Prometheus deployment whose scrape configuration and rules are determined by selected ServiceMonitors, PodMonitors, and PrometheusRules and whose alerts will be sent to all selected Alertmanagers with the custom resource's configuration.
monitoring.coreos.com.alertmanager: An alert manager is deployment whose configuration will be specified by a secret in the same namespace, which determines which alerts should go to which receiver.
- node: The base Kubernetes Node resource represents a virtual or physical machine which hosts deployments. To manage the machine lifecycle, if available, go to Cluster Management.
+ node: The base Kubernetes node resource represents a virtual or physical machine which hosts deployments. To manage the machine lifecycle, if available, go to Cluster Management.
catalog.cattle.io.clusterrepo: 'A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster.'
- catalog.cattle.io.clusterrepo.local: ' A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster. Cluster Templates are deployed via Helm charts.'
+ catalog.cattle.io.clusterrepo.local: 'A chart repository is a Helm repository or {vendor} git based application catalog. It provides the list of available charts in the cluster. Cluster Templates are deployed via Helm charts.'
catalog.cattle.io.operation: An operation is the list of recent Helm operations that have been applied to the cluster.
catalog.cattle.io.app: An installed application is a Helm 3 chart that was installed either via our charts or through the Helm CLI.
- logging.banzaicloud.io.clusterflow: Logs from the cluster will be collected and logged to the selected Cluster Output.
+ logging.banzaicloud.io.clusterflow: Logs from the cluster will be collected and logged to the selected cluster output.
logging.banzaicloud.io.clusteroutput: A cluster output defines which logging providers that logs can be sent to and is only effective when deployed in the namespace that the logging operator is in.
logging.banzaicloud.io.flow: A flow defines which logs to collect and filter as well as which output to send the logs. The flow is a namespaced resource, which means logs will only be collected from the namespace that the flow is deployed in.
logging.banzaicloud.io.output: An output defines which logging providers that logs can be sent to. The output needs to be in the same namespace as the flow that is using it.
@@ -6761,8 +6762,8 @@ typeLabel:
}
catalog.cattle.io.app: |-
{count, plural,
- one { Installed App }
- other { Installed Apps }
+ one { Installed Application }
+ other { Installed Applications }
}
catalog.cattle.io.clusterrepo: |-
{count, plural,
@@ -6771,18 +6772,18 @@ typeLabel:
}
catalog.cattle.io.repo: |-
{count, plural,
- one { Namespaced Repo }
- other { Namespaced Repos }
+ one { Namespaced Repository }
+ other { Namespaced Repositories }
}
chartinstallaction: |-
{count, plural,
- one { App }
- other { Apps }
+ one { Application }
+ other { Applications }
}
chartupgradeaction: |-
{count, plural,
- one { App }
- other { Apps }
+ one { Application }
+ other { Applications }
}
cloudcredential: |-
{count, plural,
@@ -6816,8 +6817,8 @@ typeLabel:
}
fleet.cattle.io.gitrepo: |-
{count, plural,
- one { Git Repo }
- other {Git Repos }
+ one { Git Repository }
+ other {Git Repositories }
}
management.cattle.io.authconfig: |-
{count, plural,
@@ -6922,8 +6923,8 @@ typeLabel:
}
'management.cattle.io.cluster': |-
{count, plural,
- one { Mgmt Cluster }
- other { Mgmt Clusters }
+ one { Manaagement Cluster }
+ other { Management Clusters }
}
'cluster.x-k8s.io.cluster': |-
{count, plural,
@@ -7102,8 +7103,8 @@ typeLabel:
}
harvesterhci.io.cloudtemplate: |-
{count, plural,
- one { Cloud Config Template }
- other { Cloud Config Templates }
+ one { Cloud Configuration Template }
+ other { Cloud Configuration Templates }
}
fleet.cattle.io.content: |-
{count, plural,
@@ -7122,8 +7123,8 @@ typeLabel:
}
k3s.cattle.io.addon: |-
{count, plural,
- one { Addon }
- other { Addons }
+ one { Add-on }
+ other { Add-ons }
}
management.cattle.io.apiservice: |-
{count, plural,
@@ -7342,7 +7343,7 @@ keyValue:
registryMirror:
header: Mirrors
- toolTip: 'Mirrors can be used to redirect requests for images from one registry to come from a list of endpoints you specify instead. For example docker.io could redirect to your internal registry instead of ever going to DockerHub.'
+ toolTip: 'Mirrors can be used to redirect requests for images from one registry to come from a list of endpoints you specify instead. For example docker.io could redirect to your internal registry instead of ever going to DockerHub.'
addLabel: Add Mirror
description: Mirrors define the names and endpoints for private registries. The endpoints are tried one by one, and the first working one is used.
hostnameLabel: Registry Hostname
@@ -7390,12 +7391,12 @@ advancedSettings:
'cluster-defaults': 'Override RKE Defaults when creating new clusters.'
'engine-install-url': 'Default Docker engine installation URL (for most node drivers).'
'engine-iso-url': 'Default OS installation URL (for vSphere driver).'
- 'engine-newest-version': 'The newest supported version of Docker at the time of this release. A Docker version that does not satisfy supported docker range but is newer than this will be marked as untested.'
- 'engine-supported-range': 'Semver range for supported Docker engine versions. Versions which do not satisfy this range will be marked unsupported in the UI.'
- 'ingress-ip-domain': 'Wildcard DNS domain to use for automatically generated Ingress hostnames. auth-user-session-ttl-minutes
and auth-token-max-ttl-minutes
) in the Settings page.
+ information: To change the automatic log out behaviour, edit the authorisation and session token timeout values (auth-user-session-ttl-minutes
and auth-token-max-ttl-minutes
) in the settings page.
description: When enabled and the user is inactive past the specified timeout, the UI will no longer fresh page content and the user must reload the page to continue.
authUserTTL: This timeout cannot be higher than the user session timeout auth-user-session-ttl-minutes, which is currently {current} minutes.
serverPagination:
@@ -7714,8 +7715,8 @@ support:
text: Login to SUSE Customer Center to access support for your subscription
action: SUSE Customer Center
aws:
- generateConfig: Generate Support Config
- text: 'Login to SUSE Customer Center to access support for your subscription. Need to open a new support case? Download a support config file below.'
+ generateConfig: Generate Support Configuration
+ text: 'Login to SUSE Customer Center to access support for your subscription. Need to open a new support case? Download a support configuration file below.'
promos:
one:
title: 24x7 Support
@@ -7746,7 +7747,7 @@ legacy:
project:
label: Project
- select: "Use the Project/Namespace filter at the top of the page to select a Project in order to see legacy Project features."
+ select: "Use the namespace or project filter at the top of the page to select a project in order to see legacy project features."
serverUpgrade:
title: "{vendor} Server Changed"