From d3704743346e260a1397c715576ddade2674393b Mon Sep 17 00:00:00 2001 From: Kristian Aune Date: Tue, 7 Jan 2025 08:30:24 +0100 Subject: [PATCH] Corrections from spellchecker --- .../ai/vespa/metrics/ClusterControllerMetrics.java | 4 ++-- .../java/ai/vespa/metrics/ConfigServerMetrics.java | 2 +- .../main/java/ai/vespa/metrics/ContainerMetrics.java | 4 ++-- .../java/ai/vespa/metrics/SearchNodeMetrics.java | 12 ++++++------ .../main/java/ai/vespa/metrics/StorageMetrics.java | 4 ++-- metrics/src/main/java/ai/vespa/metrics/Unit.java | 8 ++++---- .../storage/distributor/distributormetricsset.cpp | 4 ++-- .../storageserver/tls_statistics_metrics_wrapper.cpp | 4 ++-- 8 files changed, 21 insertions(+), 21 deletions(-) diff --git a/metrics/src/main/java/ai/vespa/metrics/ClusterControllerMetrics.java b/metrics/src/main/java/ai/vespa/metrics/ClusterControllerMetrics.java index 577a7a48673c..58f993bfe4ab 100644 --- a/metrics/src/main/java/ai/vespa/metrics/ClusterControllerMetrics.java +++ b/metrics/src/main/java/ai/vespa/metrics/ClusterControllerMetrics.java @@ -24,8 +24,8 @@ public enum ClusterControllerMetrics implements VespaMetrics { // DO NOT RELY ON THIS METRIC YET. NODE_EVENT_COUNT("cluster-controller.node-event.count", Unit.OPERATION, "Number of node events"), RESOURCE_USAGE_NODES_ABOVE_LIMIT("cluster-controller.resource_usage.nodes_above_limit", Unit.NODE, "The number of content nodes above resource limit, blocking feed"), - RESOURCE_USAGE_MAX_MEMORY_UTILIZATION("cluster-controller.resource_usage.max_memory_utilization", Unit.FRACTION, "Current memory utilisation, for content node with highest value"), - RESOURCE_USAGE_MAX_DISK_UTILIZATION("cluster-controller.resource_usage.max_disk_utilization", Unit.FRACTION, "Current disk space utilisation, for content node with highest value"), + RESOURCE_USAGE_MAX_MEMORY_UTILIZATION("cluster-controller.resource_usage.max_memory_utilization", Unit.FRACTION, "Current memory utilisation, for content node with the highest value"), + RESOURCE_USAGE_MAX_DISK_UTILIZATION("cluster-controller.resource_usage.max_disk_utilization", Unit.FRACTION, "Current disk space utilisation, for content node with the highest value"), RESOURCE_USAGE_MEMORY_LIMIT("cluster-controller.resource_usage.memory_limit", Unit.FRACTION, "Memory space limit as a fraction of available memory"), RESOURCE_USAGE_DISK_LIMIT("cluster-controller.resource_usage.disk_limit", Unit.FRACTION, "Disk space limit as a fraction of available disk space"), REINDEXING_PROGRESS("reindexing.progress", Unit.FRACTION, "Re-indexing progress"); diff --git a/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java b/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java index 1879cb1ff2e7..e5cc71d9f5f7 100644 --- a/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java +++ b/metrics/src/main/java/ai/vespa/metrics/ConfigServerMetrics.java @@ -98,7 +98,7 @@ public enum ConfigServerMetrics implements VespaMetrics { NUMBER_OF_SERVICES_NOT_CHECKED("numberOfServicesNotChecked", Unit.INSTANCE, "The number of services supposed to run on a node, that has not checked"), NUMBER_OF_SERVICES_DOWN("numberOfServicesDown", Unit.INSTANCE, "The number of services confirmed to not be running on a node"), SOME_SERVICES_DOWN("someServicesDown", Unit.BINARY, "One if one or more services has been confirmed to not run on a node, zero if not"), - NUMBER_OF_SERVICES_UNKNOWN("numberOfServicesUnknown", Unit.INSTANCE, "The number of services the config server does not know if is running on a node"), + NUMBER_OF_SERVICES_UNKNOWN("numberOfServicesUnknown", Unit.INSTANCE, "The number of services the config server does not know is running on a node"), NODE_FAILER_BAD_NODE("nodeFailerBadNode", Unit.BINARY, "One if the node is failed due to being bad, zero if not"), DOWN_IN_NODE_REPO("downInNodeRepo", Unit.BINARY, "One if the node is registered as being down in the node repository, zero if not"), NUMBER_OF_SERVICES("numberOfServices", Unit.INSTANCE, "Number of services supposed to run on a node"), diff --git a/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java b/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java index adab25768ec4..054a421bbb9e 100644 --- a/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java +++ b/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java @@ -134,9 +134,9 @@ public enum ContainerMetrics implements VespaMetrics { QUERY_HIT_OFFSET("query_hit_offset", Unit.HIT, "The offset for hits returned"), DOCUMENTS_COVERED("documents_covered", Unit.DOCUMENT, "The combined number of documents considered during query evaluation"), DOCUMENTS_TOTAL("documents_total", Unit.DOCUMENT, "The number of documents to be evaluated if all requests had been fully executed"), - DOCUMENTS_TARGET_TOTAL("documents_target_total", Unit.DOCUMENT, "The target number of total documents to be evaluated when when all data is in sync"), + DOCUMENTS_TARGET_TOTAL("documents_target_total", Unit.DOCUMENT, "The target number of total documents to be evaluated when all data is in sync"), JDISC_RENDER_LATENCY("jdisc.render.latency", Unit.NANOSECOND, "The time used by the container to render responses"), - QUERY_ITEM_COUNT("query_item_count", Unit.ITEM, "The number of query items (terms, phrases, etc)"), + QUERY_ITEM_COUNT("query_item_count", Unit.ITEM, "The number of query items (terms, phrases, etc.)"), DOCPROC_PROC_TIME("docproc.proctime", Unit.MILLISECOND, "Time spent processing document"), DOCPROC_DOCUMENTS("docproc.documents", Unit.DOCUMENT, "Number of processed documents"), diff --git a/metrics/src/main/java/ai/vespa/metrics/SearchNodeMetrics.java b/metrics/src/main/java/ai/vespa/metrics/SearchNodeMetrics.java index 77a5e5c0dd12..a393c6e2ea45 100644 --- a/metrics/src/main/java/ai/vespa/metrics/SearchNodeMetrics.java +++ b/metrics/src/main/java/ai/vespa/metrics/SearchNodeMetrics.java @@ -36,27 +36,27 @@ public enum SearchNodeMetrics implements VespaMetrics { // Executors shared between all document dbs CONTENT_PROTON_EXECUTOR_PROTON_QUEUESIZE("content.proton.executor.proton.queuesize", Unit.TASK, "Size of executor proton task queue"), CONTENT_PROTON_EXECUTOR_PROTON_ACCEPTED("content.proton.executor.proton.accepted", Unit.TASK, "Number of executor proton accepted tasks"), - CONTENT_PROTON_EXECUTOR_PROTON_WAKEUPS("content.proton.executor.proton.wakeups", Unit.WAKEUP, "Number of times a executor proton worker thread has been woken up"), + CONTENT_PROTON_EXECUTOR_PROTON_WAKEUPS("content.proton.executor.proton.wakeups", Unit.WAKEUP, "Number of times an executor proton worker thread has been woken up"), CONTENT_PROTON_EXECUTOR_PROTON_UTILIZATION("content.proton.executor.proton.utilization", Unit.FRACTION, "Ratio of time the executor proton worker threads has been active"), CONTENT_PROTON_EXECUTOR_PROTON_REJECTED("content.proton.executor.proton.rejected", Unit.TASK, "Number of rejected tasks"), CONTENT_PROTON_EXECUTOR_FLUSH_QUEUESIZE("content.proton.executor.flush.queuesize", Unit.TASK, "Size of executor flush task queue"), CONTENT_PROTON_EXECUTOR_FLUSH_ACCEPTED("content.proton.executor.flush.accepted", Unit.TASK, "Number of accepted executor flush tasks"), - CONTENT_PROTON_EXECUTOR_FLUSH_WAKEUPS("content.proton.executor.flush.wakeups", Unit.WAKEUP, "Number of times a executor flush worker thread has been woken up"), + CONTENT_PROTON_EXECUTOR_FLUSH_WAKEUPS("content.proton.executor.flush.wakeups", Unit.WAKEUP, "Number of times an executor flush worker thread has been woken up"), CONTENT_PROTON_EXECUTOR_FLUSH_UTILIZATION("content.proton.executor.flush.utilization", Unit.FRACTION, "Ratio of time the executor flush worker threads has been active"), CONTENT_PROTON_EXECUTOR_FLUSH_REJECTED("content.proton.executor.flush.rejected", Unit.TASK, "Number of rejected tasks"), CONTENT_PROTON_EXECUTOR_MATCH_QUEUESIZE("content.proton.executor.match.queuesize", Unit.TASK, "Size of executor match task queue"), CONTENT_PROTON_EXECUTOR_MATCH_ACCEPTED("content.proton.executor.match.accepted", Unit.TASK, "Number of accepted executor match tasks"), - CONTENT_PROTON_EXECUTOR_MATCH_WAKEUPS("content.proton.executor.match.wakeups", Unit.WAKEUP, "Number of times a executor match worker thread has been woken up"), + CONTENT_PROTON_EXECUTOR_MATCH_WAKEUPS("content.proton.executor.match.wakeups", Unit.WAKEUP, "Number of times an executor match worker thread has been woken up"), CONTENT_PROTON_EXECUTOR_MATCH_UTILIZATION("content.proton.executor.match.utilization", Unit.FRACTION, "Ratio of time the executor match worker threads has been active"), CONTENT_PROTON_EXECUTOR_MATCH_REJECTED("content.proton.executor.match.rejected", Unit.TASK, "Number of rejected tasks"), CONTENT_PROTON_EXECUTOR_DOCSUM_QUEUESIZE("content.proton.executor.docsum.queuesize", Unit.TASK, "Size of executor docsum task queue"), CONTENT_PROTON_EXECUTOR_DOCSUM_ACCEPTED("content.proton.executor.docsum.accepted", Unit.TASK, "Number of executor accepted docsum tasks"), - CONTENT_PROTON_EXECUTOR_DOCSUM_WAKEUPS("content.proton.executor.docsum.wakeups", Unit.WAKEUP, "Number of times a executor docsum worker thread has been woken up"), + CONTENT_PROTON_EXECUTOR_DOCSUM_WAKEUPS("content.proton.executor.docsum.wakeups", Unit.WAKEUP, "Number of times an executor docsum worker thread has been woken up"), CONTENT_PROTON_EXECUTOR_DOCSUM_UTILIZATION("content.proton.executor.docsum.utilization", Unit.FRACTION, "Ratio of time the executor docsum worker threads has been active"), CONTENT_PROTON_EXECUTOR_DOCSUM_REJECTED("content.proton.executor.docsum.rejected", Unit.TASK, "Number of rejected tasks"), CONTENT_PROTON_EXECUTOR_SHARED_QUEUESIZE("content.proton.executor.shared.queuesize", Unit.TASK, "Size of executor shared task queue"), CONTENT_PROTON_EXECUTOR_SHARED_ACCEPTED("content.proton.executor.shared.accepted", Unit.TASK, "Number of executor shared accepted tasks"), - CONTENT_PROTON_EXECUTOR_SHARED_WAKEUPS("content.proton.executor.shared.wakeups", Unit.WAKEUP, "Number of times a executor shared worker thread has been woken up"), + CONTENT_PROTON_EXECUTOR_SHARED_WAKEUPS("content.proton.executor.shared.wakeups", Unit.WAKEUP, "Number of times an executor shared worker thread has been woken up"), CONTENT_PROTON_EXECUTOR_SHARED_UTILIZATION("content.proton.executor.shared.utilization", Unit.FRACTION, "Ratio of time the executor shared worker threads has been active"), CONTENT_PROTON_EXECUTOR_SHARED_REJECTED("content.proton.executor.shared.rejected", Unit.TASK, "Number of rejected tasks"), CONTENT_PROTON_EXECUTOR_WARMUP_QUEUESIZE("content.proton.executor.warmup.queuesize", Unit.TASK, "Size of executor warmup task queue"), @@ -66,7 +66,7 @@ public enum SearchNodeMetrics implements VespaMetrics { CONTENT_PROTON_EXECUTOR_WARMUP_REJECTED("content.proton.executor.warmup.rejected", Unit.TASK, "Number of rejected tasks"), CONTENT_PROTON_EXECUTOR_FIELD_WRITER_QUEUESIZE("content.proton.executor.field_writer.queuesize", Unit.TASK, "Size of executor field writer task queue"), CONTENT_PROTON_EXECUTOR_FIELD_WRITER_ACCEPTED("content.proton.executor.field_writer.accepted", Unit.TASK, "Number of accepted executor field writer tasks"), - CONTENT_PROTON_EXECUTOR_FIELD_WRITER_WAKEUPS("content.proton.executor.field_writer.wakeups", Unit.WAKEUP, "Number of times a executor field writer worker thread has been woken up"), + CONTENT_PROTON_EXECUTOR_FIELD_WRITER_WAKEUPS("content.proton.executor.field_writer.wakeups", Unit.WAKEUP, "Number of times an executor field writer worker thread has been woken up"), CONTENT_PROTON_EXECUTOR_FIELD_WRITER_UTILIZATION("content.proton.executor.field_writer.utilization", Unit.FRACTION, "Ratio of time the executor fieldwriter worker threads has been active"), CONTENT_PROTON_EXECUTOR_FIELD_WRITER_SATURATION("content.proton.executor.field_writer.saturation", Unit.FRACTION, "Ratio indicating the max saturation of underlying worker threads. A higher saturation than utilization indicates a bottleneck in one of the worker threads."), CONTENT_PROTON_EXECUTOR_FIELD_WRITER_REJECTED("content.proton.executor.field_writer.rejected", Unit.TASK, "Number of rejected tasks"), diff --git a/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java b/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java index d474a85a1ec1..f2ade56b82cd 100644 --- a/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java +++ b/metrics/src/main/java/ai/vespa/metrics/StorageMetrics.java @@ -227,8 +227,8 @@ public enum StorageMetrics implements VespaMetrics { VDS_VISITOR_CV_SKIPQUEUE("vds.visitor.cv_skipqueue", Unit.OPERATION, "Number of times we could skip queue as we had free visitor spots"), // C++ capability metrics - VDS_SERVER_NETWORK_RPC_CAPABILITY_CHECKS_FAILED("vds.server.network.rpc-capability-checks-failed", Unit.FAILURE, "Number of RPC operations that failed to due one or more missing capabilities"), - VDS_SERVER_NETWORK_STATUS_CAPABILITY_CHECKS_FAILED("vds.server.network.status-capability-checks-failed", Unit.FAILURE, "Number of status page operations that failed to due one or more missing capabilities"), + VDS_SERVER_NETWORK_RPC_CAPABILITY_CHECKS_FAILED("vds.server.network.rpc-capability-checks-failed", Unit.FAILURE, "Number of RPC operations that failed due to one or more missing capabilities"), + VDS_SERVER_NETWORK_STATUS_CAPABILITY_CHECKS_FAILED("vds.server.network.status-capability-checks-failed", Unit.FAILURE, "Number of status page operations that failed due to one or more missing capabilities"), // C++ Fnet metrics VDS_SERVER_FNET_NUM_CONNECTIONS("vds.server.fnet.num-connections", Unit.CONNECTION, "Total number of connection objects"); diff --git a/metrics/src/main/java/ai/vespa/metrics/Unit.java b/metrics/src/main/java/ai/vespa/metrics/Unit.java index ef57ae120ac3..883c7992146f 100644 --- a/metrics/src/main/java/ai/vespa/metrics/Unit.java +++ b/metrics/src/main/java/ai/vespa/metrics/Unit.java @@ -18,13 +18,13 @@ public enum Unit { DOLLAR_PER_HOUR(BaseUnit.DOLLAR, BaseUnit.HOUR, "Total current cost of the cluster in $/hr"), FAILURE(BaseUnit.FAILURE, "Failures, typically for requests, operations or nodes"), FILE(BaseUnit.FILE, "Data file stored on the disk on a node"), - FRACTION(BaseUnit.FRACTION, "A value in the range [0..1]. Higher values can occur for some metrics, but would indicate the value is outside of the allowed range."), - GENERATION(BaseUnit.GENERATION,"Typically generation of configuration or application package"), + FRACTION(BaseUnit.FRACTION, "A value in the range [0..1]. Higher values can occur for some metrics, but would indicate the value is outside the allowed range."), + GENERATION(BaseUnit.GENERATION,"Typically, generation of configuration or application package"), GIGABYTE(BaseUnit.GIGABYTE,"One billion bytes"), HIT(BaseUnit.HIT, "Document that meets the filtering/restriction criteria specified by a given query"), HIT_PER_QUERY(BaseUnit.HIT, BaseUnit.QUERY, "Number of hits per query over a period of time"), HOST(BaseUnit.HOST, "Bare metal computer that contain nodes"), - INSTANCE(BaseUnit.INSTANCE, "Typically tenant or application"), + INSTANCE(BaseUnit.INSTANCE, "Typically, tenant or application"), ITEM(BaseUnit.ITEM, "Object or unit maintained in e.g. a queue"), MILLISECOND(BaseUnit.MILLISECOND, "Millisecond, 1/1000 of a second"), NANOSECOND(BaseUnit.NANOSECOND, "Nanosecond, 1/1000.000.000 of a second"), @@ -32,7 +32,7 @@ public enum Unit { PACKET(BaseUnit.PACKET, "Collection of data transmitted over the network as a single unit"), OPERATION(BaseUnit.OPERATION, "A clearly defined task"), OPERATION_PER_SECOND(BaseUnit.OPERATION, BaseUnit.SECOND, "Number of operations per second"), - PERCENTAGE(BaseUnit.PERCENTAGE, "A number expressed as a fraction of 100. Typically in the range [0..100]."), + PERCENTAGE(BaseUnit.PERCENTAGE, "A number expressed as a fraction of 100, normally in the range [0..100]."), QUERY(BaseUnit.QUERY, "A request for matching, grouping and/or scoring documents stored in Vespa"), QUERY_PER_SECOND(BaseUnit.QUERY, BaseUnit.SECOND, "Number of queries per second."), RECORD(BaseUnit.RECORD, "A collection of information, typically a set of key/value, e.g. stored in a transaction log"), diff --git a/storage/src/vespa/storage/distributor/distributormetricsset.cpp b/storage/src/vespa/storage/distributor/distributormetricsset.cpp index 4ed8cf2e5025..753d998cda06 100644 --- a/storage/src/vespa/storage/distributor/distributormetricsset.cpp +++ b/storage/src/vespa/storage/distributor/distributormetricsset.cpp @@ -34,10 +34,10 @@ DistributorMetricSet::DistributorMetricSet() "time is counted as part of the total time spent for the final, " "completed state transition", this), set_cluster_state_processing_time("set_cluster_state_processing_time", {}, - "Elapsed time where the distributor thread is blocked on processing " + "Elapsed time in which the distributor thread is blocked on processing " "its bucket database upon receiving a new cluster state", this), activate_cluster_state_processing_time("activate_cluster_state_processing_time", {}, - "Elapsed time where the distributor thread is blocked on merging pending " + "Elapsed time in which the distributor thread is blocked on merging pending " "bucket info into its bucket database upon activating a cluster state", this), recoveryModeTime("recoverymodeschedulingtime", {}, "Time spent scheduling operations in recovery mode " diff --git a/storage/src/vespa/storage/storageserver/tls_statistics_metrics_wrapper.cpp b/storage/src/vespa/storage/storageserver/tls_statistics_metrics_wrapper.cpp index 1615c8a6bd16..1f23ac9f2590 100644 --- a/storage/src/vespa/storage/storageserver/tls_statistics_metrics_wrapper.cpp +++ b/storage/src/vespa/storage/storageserver/tls_statistics_metrics_wrapper.cpp @@ -28,9 +28,9 @@ TlsStatisticsMetricsWrapper::TlsStatisticsMetricsWrapper(metrics::MetricSet* own failed_tls_config_reloads("failed-tls-config-reloads", {}, "Number of times " "background reloading of TLS config has failed", this), rpc_capability_checks_failed("rpc-capability-checks-failed", {}, - "Number of RPC operations that failed to due one or more missing capabilities", this), + "Number of RPC operations that failed due to one or more missing capabilities", this), status_capability_checks_failed("status-capability-checks-failed", {}, - "Number of status page operations that failed to due one or more missing capabilities", this), + "Number of status page operations that failed due to one or more missing capabilities", this), last_client_stats_snapshot(), last_server_stats_snapshot(), last_config_stats_snapshot(),